file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
full-site.js | var failCount = 0;
var onLogoutRemoveIds = [];
var reoloadPageForChat = false;
/**
* @author Gehad Mohamed
*/
function showLoginPopUp(){
if(!isLoggedIn){
$('#login-popup').lightcase('start',{
href: "#login-popup",
liveResize:true,
maxHeight:1000,
onClose: {
qux:function(){
// afterLoginPerformAction = null;
}
},
onFinish : {
baz: function() {
initFormValidation('#ajax-form-login-resp');
}
}
});
}
}
function showNotification(type, txt) {
toastr.options.rtl = true;
toastr.options.positionClass= 'toast-top-right';
toastr.options.progressBar = true;
if(type == 'error') {
toastr.error(txt, "")
}
if(type == 'success') {
toastr.success(txt, "")
}
}
/**
* @author Mahmoud Mostafa <mahmoud.mostafa@ibtikar.net.sa>
* @param {boolean} switchToSameUrl if set the redirect will be to the same page url
*/
function switchView(switchToSameUrl) {
var switchUrl = homeUrl;
if (switchToSameUrl) {
switchUrl = window.location.href;
}
window.location = switchViewUrl + '?value=mobile&redirectToUrl=' + encodeURIComponent(switchUrl.replace(window.location.protocol + '//' + site_domain, window.location.protocol + '//' + mobile_site_domain));
}
function scrollToElm(selector, extraOffset) {
var offset = $(selector).offset();
extraOffset = typeof extraOffset != "undefined" ? extraOffset : 0;
if (typeof offset != "undefined" && extraOffset == 0) {
$(document).scrollTop(offset.top)
} else if ($(document).scrollTop() > offset.top + extraOffset) {
$(document).scrollTop(offset.top + extraOffset);
}
}
function clickElm(selector) {
if ($(selector).length > 0) {
$(selector).trigger('click');
}
}
/**
*
* @param {type} choicesModalTitle
* @param {type} choicesModalMessage
* @param {type} buttons like --> [ { textValue: "Ok", clickAction: "function"},{ text: "Ok", click: "function"},{ text: "Ok", click: "function"} ]
* @param {type} onCancelFunction [optional]
*/
function showChoicesModal(choicesModalTitle, choicesModalMessage, buttons, onCancelFunction) {
var $choicesModal = $('#choices-modal');
$choicesModal.find('.modal-title').html(choicesModalTitle);
$choicesModal.find('.modal-body').html(choicesModalMessage);
$choicesModal.find('.modal-footer').html("");
var $firstButton;
for (var i in buttons) {
var btn = buttons[i];
var attrsString = "";
for (var key in btn.attrs) {
var value = btn.attrs[key];
attrsString += key + '="' + value + '" ';
}
var $button = $('<a target="_self" ' + attrsString + ' onclick="' + btn.clickAction + '">' + btn.textValue + '</a>');
if (!$firstButton) {
$firstButton = $button;
}
$choicesModal.find('.modal-footer').append($button);
}
$choicesModal.modal({keyboard: true});
$choicesModal.on('shown.bs.modal', function () {
if ($firstButton && window.location == window.parent.location) {
$firstButton.focus();
}
});
$choicesModal.modal('show');
$(".btnPrint").printPage();
$choicesModal.off('hidden.bs.modal');
$choicesModal.on('hidden.bs.modal', function (e) {
if (onCancelFunction)
onCancelFunction();
});
}
function htmlEncode(str) {
return str.replace(/</g, '<').replace(/>/g, '>').replace(/'/g, ''').replace(/"/g, '"');
}
function | () {
$('#choices-modal').modal('hide');
}
function userStateChange(data, triggerLoginEvent) {
var data = typeof data == "undefined" ? null : data;
// $('.alert-danger').remove();
$('.login-slid-div').slideUp(300);
if (data) {
if(data.user.avatar){
$(".userImage").html('<i><img src="/'+data.user.avatar+'" /></i> ' + data.user.username + '<span class="caret"></span>');// responsive
$('.dev-user-profile').html('<i><img class="img-circle dev-profile-image" src="/'+data.user.avatar+'"/></i> '+data.user.username+'<span class="caret"></span>')
}else{
$(".userImage").html('<i class="fas fa-user-circle" ></i> ' + data.user.username + '<span class="caret"></span>');// responsive
$('.dev-user-profile').html('<i class="fas fa-user-circle fa-2x" style="margin-top: 5px;"></i> '+data.user.username+'<span class="caret"></span>')
}
$('.dev-anon-container').addClass('hide');
$('.dev-login-in').removeClass('hide');
// responsive
$('.userNotLogged').addClass('hide');
$('.userLogged').removeClass('hide');
if (data.user.materialCreate) {
$('.dev-material-create').removeClass('hide');
}
if (data.user.backend) {
$('.dev-backend-control').removeClass('hide');
}
if (data.user.comicsCreate) {
$('.dev-comics-create').removeClass('hide');
}
isLoggedIn = true;
if (triggerLoginEvent) {
$(window).trigger('user.loggedin');
}
$('.top-note').addClass('hidden');
for (var variableName in data.injectJSVariables) {
window[variableName] = data.injectJSVariables[variableName];
}
for (var fileId in data.injectFiles) {
loadScript(data.injectFiles[fileId], null, fileId);
onLogoutRemoveIds.push(fileId);
}
if (typeof afterLoginPerformAction === 'function') {
afterLoginPerformAction();
afterLoginPerformAction = null;
}
// if($('#login-popup').is(':visible')){
// lightcase.close();
// }
} else {
$('.dev-user-profile').html("");
// $('[type="password"]').val("");
$('.dev-anon-container').removeClass('hide');
$('.dev-login-in').addClass('hide');
$('#dev-material-create').addClass('hide');
$('#dev-backend-control').addClass('hide');
$('#dev-comics-create').addClass('hide');
if (typeof timerNotificationsInterval !== 'undefined' && timerNotificationsInterval) {
clearInterval(timerNotificationsInterval);
}
var userStatusLognout = isLoggedIn;
isLoggedIn = false;
if (userStatusLognout) {
$(window).trigger('user.loggedout');
}
$('.top-note').removeClass('hidden');
for (var fileIdIndex in onLogoutRemoveIds) {
$('#' + onLogoutRemoveIds[fileIdIndex]).remove();
}
}
}
function showAuthError(error) {
if (++failCount >= 3 || error.indexOf("Captcha") != -1) {
location.href = loginUrl;
} else {
showNotification('error',error);
// $('.dev-login-li').find('.alert').remove();
// $('.dev-login-li').prepend('<div class="alert alert-danger remove-5s">'
// + error + '</div>');
// if($('#ajax-form-login-resp').is(':visible')) $('#login-popup').lightcase('resize');
}
}
function SocialNetworkConnect(element) {
newWindow = window.open($(element).attr("data-url"), '', 'height=800, width=1000');
if (window.focus) {
newWindow.focus();
}
timer = setInterval(checkChild, 500);
}
function checkChild() {
if (errorMessage != false) {
if (newWindow.closed) {
msg = '<div class="alert alert-danger remove-5s">' + socialNetworkErrorMessage + '</div>';
if ($('.dev-login-li .alert').length > 0) {
$('.dev-login-li .alert').remove();
}
$('.dev-login-li').prepend(msg);
clearInterval(timer);
}
}
}
function show_email_modal() {
document.getElementById('form_email').value = "";
// $('#form_email').css('text-indent', '35px');
$('#form-modal .help-error').remove();
$('#form-modal .form-group').removeClass('is-invalid');
$('#form-modal').modal('show');
}
function getprayerTimeData() {
$.ajax({
url: getPrayerInfoUrl,
success: preparePrayerTimeWidget
});
}
// increaseFontSize and decreaseFontSize
var min = 16;
var max = 20;
function increaseFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
if (p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px", ""));
} else {
var s = 18;
}
if (s != max) {
s += 1;
}
p[i].style.fontSize = s + "px"
}
}
function decreaseFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
if (p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px", ""));
} else {
var s = 18;
}
if (s != min) {
s -= 1;
}
p[i].style.fontSize = s + "px"
}
}
function resetFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
p[i].style.fontSize = "18px"
}
}
$('body').on('click','.largeFont',function () {
increaseFontSize();
});
$('body').on('click','.smallFont',function () {
decreaseFontSize();
});
$('body').on('click','.normalFont',function () {
resetFontSize();
});
function sharePopup(url, w, h) {
var left = (screen.width / 2) - (w / 2);
var top = (screen.height / 2) - (h / 2);
return window.open(url, "share window", 'toolbar=no, location=no, directories=no, status=no, menubar=no, scrollbars=yes, copyhistory=no, width=' + w + ', height=' + h + ', top=' + top + ', left=' + left);
}
function loginToChat() {
$.ajax({
url: chatLoginUrl,
success: function (data) {
if (reoloadPageForChat && data.loggedIn) {
window.location.reload(true);
return;
}
loadScript('https://repository.chatwee.com/scripts/72e4b84d2ef104b50494d305ab4bde88.js', null, 'chatwee-js-tag');
}
});
}
function logoutFromChat() {
$.ajax({
url: chatLogoutUrl,
success: function() {
$('#chatwee-js-tag').remove();
}
});
}
$(document).on('shown.bs.tab', 'a[data-toggle="tab"]',function (e) {
var target = $(e.target).attr("href") // activated tab
if(target=='#tab_default_2'){
setTimeout(function(){
initFormValidation() ;
},200)
}
});
jQuery(document).ready(function ($) {
// $(window).on('user.loggedin', loginToChat);
// $(window).on('user.loggedout', logoutFromChat);
$('form[name=searchForm]').submit(function (e) {
if (typeof inAngularLayout === 'undefined') {
e.preventDefault();
$(this).data('submitted', true);
var searchString = $(this).find('input[name=searchString]').val().trim();
if (!searchString || searchString.length < 3) {
$(this).find('.form-group').addClass('input-is-invalid');
$(this).find('span').show();
} else {
window.location = searchPageUrl + '?searchString=' + encodeURIComponent(searchString);
}
}
});
// the quick search forms inputs validation action
$('input[name=searchString]').keyup(function () {
if (typeof inAngularLayout === 'undefined') {
if ($(this).closest('form').data('submitted')) {
var searchString = $(this).val().trim();
if (!searchString || searchString.length < 3) {
$(this).parent().addClass('input-is-invalid');
$(this).parent().find('span').show();
} else {
$(this).parent().removeClass('input-is-invalid');
$(this).parent().find('span').hide();
}
}
}
});
// hijri date switcher
$(function () {
$ds = $('#dev-date-container span');
$ds.hide().eq(0).show();
setInterval(function () {
$ds.filter(':visible').fadeOut(function () {
var $div = $(this).next('span');
if ($div.length == 0) {
$ds.eq(0).fadeIn();
} else {
$div.fadeIn();
}
});
}, 5000);
});
//submenu
$('.menu-item-section').on('click', function (x, y, z) {
var menuIndex = $('.menu-item-section').index(this);
$('.submenu-dropzone').slideUp(300);
$('.submenu-dropzone').eq(menuIndex).stop(true, false, true).slideToggle(300);
return false;
});
// var currentTime = new Date();
// var dateInHailTimezone = new Date((currentTime.getTime() + (currentTime.getTimezoneOffset() * 60000)) + (3600000 * 3));
// var currentDayDate = dateInHailTimezone.toLocaleDateString('ar-SA', {weekday: 'long', year: 'numeric', month: 'long', day: 'numeric'});
// var todayDate = currentTime.getFullYear() + '-' + (currentTime.getMonth() + 1) + '-' + currentTime.getDate();
// var prayerNames = {'Fajr': 'الفجر', 'Dhuhr': 'الظهر', 'Asr': 'العصر', 'Maghrib': 'المغرب', 'Isha': 'العشاء'};
//
// function getTimeStringFromDateObject(status) {
// var timeStatus = 'AM';
// if (status == "مساءًا") {
// timeStatus = 'PM';
// }
//
// return timeStatus;
// }
// $(window).on('activePrayerTime', function () {
// $('.dev-active-prayer-label').text(activePrayerObject.activePrayerLabel)
// $('.dev-active-prayer-time').text(activePrayerObject.activePrayer)
// $('.dev-asr').text(prayerTimeDate.asr);
// $('.dev-fajr').text(prayerTimeDate.fajr);
// $('.dev-isha').text(prayerTimeDate.isha);
// $('.dev-maghrib').text(prayerTimeDate.maghrib);
// $('.dev-zuhr').text(prayerTimeDate.zuhr);
// $('.dev-prayer-city').text(prayerCity);
// $('.dev-remaing-label').text('يحين ' + activePrayerObject.activePrayerLabel + ' بعد');
// displayCount(remaining, 'dev-remaining-time', getNextActivePrayer);
// });
//
// // show the ads in the pages that does not include the angular files
//// setTimeout(function () {
//// if (typeof inAngularLayout === 'undefined') {
//// addGeneralAds('General');
//// }
//// }, 1000);
//
$.ajax({
url: getUserStatsUrl,
success: function (data) {
if (data) {
userStateChange(data.data, true);
}
}
});
//
$('body').on('submit', '#ajax-form-login,#ajax-form-login-resp,#ajax-form-login-comment', function (e) {
e.preventDefault();
// $('.alert-danger').remove();
var $this = $(e.currentTarget),
inputs = {};
// Send all form's inputs
$.each($this.find('input'), function (i, item) {
var $item = $(item);
if ($item.is(':checkbox')) {
if ($item.is(':checked'))
inputs[$item.attr('name')] = 1;
} else {
inputs[$item.attr('name')] = $item.val();
}
});
// Send form into ajax
if ($(this).valid()) {
$.ajax({
url: $this.attr('action'),
type: 'POST',
dataType: 'json',
data: inputs,
success: function (data) {
if (data.has_error) {
showAuthError(data.error);
} else {
window.location.reload();
}
}
});
}
});
// $('.ajax-logout').click(function (e) {
// e.preventDefault();
// $.ajax({
// url: $(this).attr('href'),
// complete: function () {
// userStateChange();
// if (typeof angular == "undefined") {
// location.href = homePageUrl;
// }
// }
// });
// });
//
// $('body').on('click','.collapse-btn',function () {
// $(this).toggleClass("on");
// $($(this).attr('data-ctrl')).slideToggle();
// });
//
// jQuery(function() {
// jQuery.scrollDepth({
// nonInteraction:false
// });
// });
// Ready Methods
getBreakingNews();
//---------(Functions)------------>
function getBreakingNews(){
$.ajax({
url: $("#breakingNewsUrl").val(),
method: 'GET',
success: function (responseData) {
$("#breakingNews").html(responseData);
}
});
}
});
| closeDialog | identifier_name |
full-site.js | var failCount = 0;
var onLogoutRemoveIds = [];
var reoloadPageForChat = false;
/**
* @author Gehad Mohamed
*/
function showLoginPopUp(){
if(!isLoggedIn){
$('#login-popup').lightcase('start',{
href: "#login-popup",
liveResize:true,
maxHeight:1000,
onClose: {
qux:function(){
// afterLoginPerformAction = null;
}
},
onFinish : {
baz: function() {
initFormValidation('#ajax-form-login-resp');
}
}
});
}
}
function showNotification(type, txt) {
toastr.options.rtl = true;
toastr.options.positionClass= 'toast-top-right';
toastr.options.progressBar = true;
if(type == 'error') {
toastr.error(txt, "")
}
if(type == 'success') {
toastr.success(txt, "")
}
}
/**
* @author Mahmoud Mostafa <mahmoud.mostafa@ibtikar.net.sa>
* @param {boolean} switchToSameUrl if set the redirect will be to the same page url
*/
function switchView(switchToSameUrl) {
var switchUrl = homeUrl;
if (switchToSameUrl) {
switchUrl = window.location.href;
}
window.location = switchViewUrl + '?value=mobile&redirectToUrl=' + encodeURIComponent(switchUrl.replace(window.location.protocol + '//' + site_domain, window.location.protocol + '//' + mobile_site_domain));
}
function scrollToElm(selector, extraOffset) {
var offset = $(selector).offset();
extraOffset = typeof extraOffset != "undefined" ? extraOffset : 0;
if (typeof offset != "undefined" && extraOffset == 0) {
$(document).scrollTop(offset.top)
} else if ($(document).scrollTop() > offset.top + extraOffset) {
$(document).scrollTop(offset.top + extraOffset);
}
}
function clickElm(selector) {
if ($(selector).length > 0) {
$(selector).trigger('click');
}
}
/**
*
* @param {type} choicesModalTitle
* @param {type} choicesModalMessage
* @param {type} buttons like --> [ { textValue: "Ok", clickAction: "function"},{ text: "Ok", click: "function"},{ text: "Ok", click: "function"} ]
* @param {type} onCancelFunction [optional]
*/
function showChoicesModal(choicesModalTitle, choicesModalMessage, buttons, onCancelFunction) {
var $choicesModal = $('#choices-modal');
$choicesModal.find('.modal-title').html(choicesModalTitle);
$choicesModal.find('.modal-body').html(choicesModalMessage);
$choicesModal.find('.modal-footer').html("");
var $firstButton;
for (var i in buttons) {
var btn = buttons[i];
var attrsString = "";
for (var key in btn.attrs) {
var value = btn.attrs[key];
attrsString += key + '="' + value + '" ';
}
var $button = $('<a target="_self" ' + attrsString + ' onclick="' + btn.clickAction + '">' + btn.textValue + '</a>');
if (!$firstButton) {
$firstButton = $button;
}
$choicesModal.find('.modal-footer').append($button);
}
$choicesModal.modal({keyboard: true});
$choicesModal.on('shown.bs.modal', function () {
if ($firstButton && window.location == window.parent.location) {
$firstButton.focus();
}
});
$choicesModal.modal('show');
$(".btnPrint").printPage();
$choicesModal.off('hidden.bs.modal');
$choicesModal.on('hidden.bs.modal', function (e) {
if (onCancelFunction)
onCancelFunction();
});
}
function htmlEncode(str) {
return str.replace(/</g, '<').replace(/>/g, '>').replace(/'/g, ''').replace(/"/g, '"');
}
function closeDialog() {
$('#choices-modal').modal('hide');
}
function userStateChange(data, triggerLoginEvent) {
var data = typeof data == "undefined" ? null : data;
// $('.alert-danger').remove();
$('.login-slid-div').slideUp(300);
if (data) {
if(data.user.avatar){
$(".userImage").html('<i><img src="/'+data.user.avatar+'" /></i> ' + data.user.username + '<span class="caret"></span>');// responsive
$('.dev-user-profile').html('<i><img class="img-circle dev-profile-image" src="/'+data.user.avatar+'"/></i> '+data.user.username+'<span class="caret"></span>')
}else{
$(".userImage").html('<i class="fas fa-user-circle" ></i> ' + data.user.username + '<span class="caret"></span>');// responsive
$('.dev-user-profile').html('<i class="fas fa-user-circle fa-2x" style="margin-top: 5px;"></i> '+data.user.username+'<span class="caret"></span>')
}
$('.dev-anon-container').addClass('hide');
$('.dev-login-in').removeClass('hide');
// responsive
$('.userNotLogged').addClass('hide');
$('.userLogged').removeClass('hide');
if (data.user.materialCreate) {
$('.dev-material-create').removeClass('hide');
}
if (data.user.backend) {
$('.dev-backend-control').removeClass('hide');
}
if (data.user.comicsCreate) {
$('.dev-comics-create').removeClass('hide');
}
isLoggedIn = true;
if (triggerLoginEvent) {
$(window).trigger('user.loggedin');
}
$('.top-note').addClass('hidden');
for (var variableName in data.injectJSVariables) {
window[variableName] = data.injectJSVariables[variableName];
}
for (var fileId in data.injectFiles) {
loadScript(data.injectFiles[fileId], null, fileId);
onLogoutRemoveIds.push(fileId);
}
if (typeof afterLoginPerformAction === 'function') {
afterLoginPerformAction();
afterLoginPerformAction = null;
}
// if($('#login-popup').is(':visible')){
// lightcase.close();
// }
} else |
}
function showAuthError(error) {
if (++failCount >= 3 || error.indexOf("Captcha") != -1) {
location.href = loginUrl;
} else {
showNotification('error',error);
// $('.dev-login-li').find('.alert').remove();
// $('.dev-login-li').prepend('<div class="alert alert-danger remove-5s">'
// + error + '</div>');
// if($('#ajax-form-login-resp').is(':visible')) $('#login-popup').lightcase('resize');
}
}
function SocialNetworkConnect(element) {
newWindow = window.open($(element).attr("data-url"), '', 'height=800, width=1000');
if (window.focus) {
newWindow.focus();
}
timer = setInterval(checkChild, 500);
}
function checkChild() {
if (errorMessage != false) {
if (newWindow.closed) {
msg = '<div class="alert alert-danger remove-5s">' + socialNetworkErrorMessage + '</div>';
if ($('.dev-login-li .alert').length > 0) {
$('.dev-login-li .alert').remove();
}
$('.dev-login-li').prepend(msg);
clearInterval(timer);
}
}
}
function show_email_modal() {
document.getElementById('form_email').value = "";
// $('#form_email').css('text-indent', '35px');
$('#form-modal .help-error').remove();
$('#form-modal .form-group').removeClass('is-invalid');
$('#form-modal').modal('show');
}
function getprayerTimeData() {
$.ajax({
url: getPrayerInfoUrl,
success: preparePrayerTimeWidget
});
}
// increaseFontSize and decreaseFontSize
var min = 16;
var max = 20;
function increaseFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
if (p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px", ""));
} else {
var s = 18;
}
if (s != max) {
s += 1;
}
p[i].style.fontSize = s + "px"
}
}
function decreaseFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
if (p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px", ""));
} else {
var s = 18;
}
if (s != min) {
s -= 1;
}
p[i].style.fontSize = s + "px"
}
}
function resetFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
p[i].style.fontSize = "18px"
}
}
$('body').on('click','.largeFont',function () {
increaseFontSize();
});
$('body').on('click','.smallFont',function () {
decreaseFontSize();
});
$('body').on('click','.normalFont',function () {
resetFontSize();
});
function sharePopup(url, w, h) {
var left = (screen.width / 2) - (w / 2);
var top = (screen.height / 2) - (h / 2);
return window.open(url, "share window", 'toolbar=no, location=no, directories=no, status=no, menubar=no, scrollbars=yes, copyhistory=no, width=' + w + ', height=' + h + ', top=' + top + ', left=' + left);
}
function loginToChat() {
$.ajax({
url: chatLoginUrl,
success: function (data) {
if (reoloadPageForChat && data.loggedIn) {
window.location.reload(true);
return;
}
loadScript('https://repository.chatwee.com/scripts/72e4b84d2ef104b50494d305ab4bde88.js', null, 'chatwee-js-tag');
}
});
}
function logoutFromChat() {
$.ajax({
url: chatLogoutUrl,
success: function() {
$('#chatwee-js-tag').remove();
}
});
}
$(document).on('shown.bs.tab', 'a[data-toggle="tab"]',function (e) {
var target = $(e.target).attr("href") // activated tab
if(target=='#tab_default_2'){
setTimeout(function(){
initFormValidation() ;
},200)
}
});
jQuery(document).ready(function ($) {
// $(window).on('user.loggedin', loginToChat);
// $(window).on('user.loggedout', logoutFromChat);
$('form[name=searchForm]').submit(function (e) {
if (typeof inAngularLayout === 'undefined') {
e.preventDefault();
$(this).data('submitted', true);
var searchString = $(this).find('input[name=searchString]').val().trim();
if (!searchString || searchString.length < 3) {
$(this).find('.form-group').addClass('input-is-invalid');
$(this).find('span').show();
} else {
window.location = searchPageUrl + '?searchString=' + encodeURIComponent(searchString);
}
}
});
// the quick search forms inputs validation action
$('input[name=searchString]').keyup(function () {
if (typeof inAngularLayout === 'undefined') {
if ($(this).closest('form').data('submitted')) {
var searchString = $(this).val().trim();
if (!searchString || searchString.length < 3) {
$(this).parent().addClass('input-is-invalid');
$(this).parent().find('span').show();
} else {
$(this).parent().removeClass('input-is-invalid');
$(this).parent().find('span').hide();
}
}
}
});
// hijri date switcher
$(function () {
$ds = $('#dev-date-container span');
$ds.hide().eq(0).show();
setInterval(function () {
$ds.filter(':visible').fadeOut(function () {
var $div = $(this).next('span');
if ($div.length == 0) {
$ds.eq(0).fadeIn();
} else {
$div.fadeIn();
}
});
}, 5000);
});
//submenu
$('.menu-item-section').on('click', function (x, y, z) {
var menuIndex = $('.menu-item-section').index(this);
$('.submenu-dropzone').slideUp(300);
$('.submenu-dropzone').eq(menuIndex).stop(true, false, true).slideToggle(300);
return false;
});
// var currentTime = new Date();
// var dateInHailTimezone = new Date((currentTime.getTime() + (currentTime.getTimezoneOffset() * 60000)) + (3600000 * 3));
// var currentDayDate = dateInHailTimezone.toLocaleDateString('ar-SA', {weekday: 'long', year: 'numeric', month: 'long', day: 'numeric'});
// var todayDate = currentTime.getFullYear() + '-' + (currentTime.getMonth() + 1) + '-' + currentTime.getDate();
// var prayerNames = {'Fajr': 'الفجر', 'Dhuhr': 'الظهر', 'Asr': 'العصر', 'Maghrib': 'المغرب', 'Isha': 'العشاء'};
//
// function getTimeStringFromDateObject(status) {
// var timeStatus = 'AM';
// if (status == "مساءًا") {
// timeStatus = 'PM';
// }
//
// return timeStatus;
// }
// $(window).on('activePrayerTime', function () {
// $('.dev-active-prayer-label').text(activePrayerObject.activePrayerLabel)
// $('.dev-active-prayer-time').text(activePrayerObject.activePrayer)
// $('.dev-asr').text(prayerTimeDate.asr);
// $('.dev-fajr').text(prayerTimeDate.fajr);
// $('.dev-isha').text(prayerTimeDate.isha);
// $('.dev-maghrib').text(prayerTimeDate.maghrib);
// $('.dev-zuhr').text(prayerTimeDate.zuhr);
// $('.dev-prayer-city').text(prayerCity);
// $('.dev-remaing-label').text('يحين ' + activePrayerObject.activePrayerLabel + ' بعد');
// displayCount(remaining, 'dev-remaining-time', getNextActivePrayer);
// });
//
// // show the ads in the pages that does not include the angular files
//// setTimeout(function () {
//// if (typeof inAngularLayout === 'undefined') {
//// addGeneralAds('General');
//// }
//// }, 1000);
//
$.ajax({
url: getUserStatsUrl,
success: function (data) {
if (data) {
userStateChange(data.data, true);
}
}
});
//
$('body').on('submit', '#ajax-form-login,#ajax-form-login-resp,#ajax-form-login-comment', function (e) {
e.preventDefault();
// $('.alert-danger').remove();
var $this = $(e.currentTarget),
inputs = {};
// Send all form's inputs
$.each($this.find('input'), function (i, item) {
var $item = $(item);
if ($item.is(':checkbox')) {
if ($item.is(':checked'))
inputs[$item.attr('name')] = 1;
} else {
inputs[$item.attr('name')] = $item.val();
}
});
// Send form into ajax
if ($(this).valid()) {
$.ajax({
url: $this.attr('action'),
type: 'POST',
dataType: 'json',
data: inputs,
success: function (data) {
if (data.has_error) {
showAuthError(data.error);
} else {
window.location.reload();
}
}
});
}
});
// $('.ajax-logout').click(function (e) {
// e.preventDefault();
// $.ajax({
// url: $(this).attr('href'),
// complete: function () {
// userStateChange();
// if (typeof angular == "undefined") {
// location.href = homePageUrl;
// }
// }
// });
// });
//
// $('body').on('click','.collapse-btn',function () {
// $(this).toggleClass("on");
// $($(this).attr('data-ctrl')).slideToggle();
// });
//
// jQuery(function() {
// jQuery.scrollDepth({
// nonInteraction:false
// });
// });
// Ready Methods
getBreakingNews();
//---------(Functions)------------>
function getBreakingNews(){
$.ajax({
url: $("#breakingNewsUrl").val(),
method: 'GET',
success: function (responseData) {
$("#breakingNews").html(responseData);
}
});
}
});
| {
$('.dev-user-profile').html("");
// $('[type="password"]').val("");
$('.dev-anon-container').removeClass('hide');
$('.dev-login-in').addClass('hide');
$('#dev-material-create').addClass('hide');
$('#dev-backend-control').addClass('hide');
$('#dev-comics-create').addClass('hide');
if (typeof timerNotificationsInterval !== 'undefined' && timerNotificationsInterval) {
clearInterval(timerNotificationsInterval);
}
var userStatusLognout = isLoggedIn;
isLoggedIn = false;
if (userStatusLognout) {
$(window).trigger('user.loggedout');
}
$('.top-note').removeClass('hidden');
for (var fileIdIndex in onLogoutRemoveIds) {
$('#' + onLogoutRemoveIds[fileIdIndex]).remove();
}
} | conditional_block |
embeddings.rs | use std::cmp::Ordering;
use std::collections::hash_map::Entry;
use std::collections::{BinaryHeap, HashMap, HashSet};
use std::iter::Enumerate;
use std::slice;
use failure::Error;
use ndarray::{Array1, Array2, ArrayView1, ArrayView2, Axis};
/// A word similarity.
///
/// This data structure is used to store a pair consisting of a word and
/// its similarity to a query word.
#[derive(Debug)]
pub struct WordSimilarity<'a> {
pub word: &'a str,
pub similarity: f32,
}
impl<'a> Ord for WordSimilarity<'a> {
fn cmp(&self, other: &Self) -> Ordering {
if self.similarity > other.similarity {
Ordering::Less
} else if self.similarity < other.similarity {
Ordering::Greater
} else {
self.word.cmp(other.word)
}
}
}
impl<'a> PartialOrd for WordSimilarity<'a> {
fn partial_cmp(&self, other: &WordSimilarity) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a> Eq for WordSimilarity<'a> {}
impl<'a> PartialEq for WordSimilarity<'a> {
fn eq(&self, other: &WordSimilarity) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Debug, Fail)]
pub enum BuilderError {
#[fail(
display = "invalid embedding shape, expected: {}, got: {}",
expected_len, len
)]
InvalidEmbeddingLength { expected_len: usize, len: usize },
#[fail(display = "word not unique: {}", word)]
DuplicateWord { word: String },
}
/// Builder for word embedding matrices.
///
/// This builder can be used to construct an embedding matrix. The builder
/// does not assume that the number of embeddings is known ahead of time.
/// The embedding size is determined by the size of the first embedding that
/// is added. All subsequently added embeddings should have the same size.
pub struct Builder {
words: Vec<String>,
indices: HashMap<String, usize>,
embeddings: Vec<Array1<f32>>,
}
impl Builder {
/// Create a builder.
pub fn new() -> Self {
Builder {
words: Vec::new(),
indices: HashMap::new(),
embeddings: Vec::new(),
}
}
/// Construct the final embeddin matrix.
///
/// The `None` is returned when no embedding was added to the builder.
pub fn build(self) -> Option<Embeddings> {
let embed_len = self.embeddings.first()?.shape()[0];
let mut matrix = Array2::zeros((self.embeddings.len(), embed_len));
for (idx, embed) in self.embeddings.into_iter().enumerate() {
matrix.index_axis_mut(Axis(0), idx).assign(&embed);
}
Some(Embeddings {
embed_len,
indices: self.indices,
words: self.words,
matrix,
})
}
/// Add a new embedding to the builder.
///
/// An `Err` value is returned when the word has been inserted in the
/// builder before or when the embedding has a different size than
/// previously inserted embeddings.
pub fn push<S, E>(&mut self, word: S, embedding: E) -> Result<(), Error>
where
S: Into<String>,
E: Into<Array1<f32>>,
{
let word = word.into();
let embedding = embedding.into();
// Check that the embedding has the same length as embeddings that
// were inserted before.
if let Some(first) = self.embeddings.first() {
ensure!(
embedding.shape() == first.shape(),
BuilderError::InvalidEmbeddingLength {
expected_len: first.shape()[0],
len: embedding.shape()[0],
}
);
}
// Insert the word if it was not inserted before.
match self.indices.entry(word.to_owned()) { | self.words.push(word.clone());
self.embeddings.push(embedding);
Ok(())
}
}
/// Word embeddings.
///
/// This data structure stores word embeddings (also known as *word vectors*)
/// and provides some useful methods on the embeddings, such as similarity
/// and analogy queries.
pub struct Embeddings {
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
}
impl Embeddings {
/// Perform an analogy query.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
) -> Option<Vec<WordSimilarity>> {
self.analogy_by(word1, word2, word3, limit, |embeds, embed| {
embeds.dot(&embed)
})
}
/// Perform an analogy query using the given similarity function.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy_by<F>(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let embedding1 = self
.indices
.get(word1)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding2 = self
.indices
.get(word2)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding3 = self
.indices
.get(word3)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding = (embedding2 - embedding1) + embedding3;
let skip = [word1, word2, word3].iter().cloned().collect();
Some(self.similarity_(embedding.view(), &skip, limit, similarity))
}
/// Get (a view of) the raw embedding matrix.
pub fn data(&self) -> ArrayView2<f32> {
self.matrix.view()
}
/// Return the length (in vector components) of the word embeddings.
pub fn embed_len(&self) -> usize {
self.embed_len
}
/// Get the embedding of a word.
pub fn embedding(&self, word: &str) -> Option<ArrayView1<f32>> {
self.indices
.get(word)
.map(|idx| self.matrix.index_axis(Axis(0), *idx))
}
/// Get the mapping from words to row indices of the embedding matrix.
pub fn indices(&self) -> &HashMap<String, usize> {
&self.indices
}
/// Get an iterator over pairs of words and the corresponding embeddings.
pub fn iter(&self) -> Iter {
Iter {
embeddings: self,
inner: self.words.iter().enumerate(),
}
}
/// Normalize the embeddings using their L2 (euclidean) norms.
///
/// **Note:** when you are using the output of e.g. word2vec, you should
/// normalize the embeddings to get good query results.
pub fn normalize(&mut self) {
for mut embedding in self.matrix.outer_iter_mut() {
let l2norm = embedding.dot(&embedding).sqrt();
if l2norm != 0f32 {
embedding /= l2norm;
}
}
}
/// Find words that are similar to the query word.
///
/// The similarity between two words is defined by the dot product of
/// the embeddings. If the vectors are unit vectors (e.g. by virtue of
/// calling `normalize`), this is the cosine similarity. At most, `limit`
/// results are returned.
pub fn similarity(&self, word: &str, limit: usize) -> Option<Vec<WordSimilarity>> {
self.similarity_by(word, limit, |embeds, embed| embeds.dot(&embed))
}
/// Find words that are similar to the query word using the given similarity
/// function.
///
/// The similarity function should return, given the embeddings matrix and
/// the word vector a vector of similarity scores. At most, `limit` results
/// are returned.
pub fn similarity_by<F>(
&self,
word: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
self.indices.get(word).map(|idx| {
let embedding = self.matrix.index_axis(Axis(0), *idx);
let mut skip = HashSet::new();
skip.insert(word);
self.similarity_(embedding, &skip, limit, similarity)
})
}
fn similarity_<F>(
&self,
embed: ArrayView1<f32>,
skip: &HashSet<&str>,
limit: usize,
mut similarity: F,
) -> Vec<WordSimilarity>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let sims = similarity(self.matrix.view(), embed);
let mut results: BinaryHeap<WordSimilarity> = BinaryHeap::new();
for (idx, sim) in sims.iter().enumerate() {
let word = self.words[idx].as_ref();
// Don't add words that we are explicitly asked to skip.
if skip.contains(word) {
continue;
}
let word_distance = WordSimilarity {
word: word,
similarity: *sim,
};
if results.len() == limit {
if let Some(mut min_distance) = results.peek_mut() {
if word_distance.similarity > min_distance.similarity {
*min_distance = word_distance
}
}
} else {
results.push(word_distance);
}
}
results.into_sorted_vec()
}
/// Get the number of words for which embeddings are stored.
pub fn len(&self) -> usize {
self.words.len()
}
/// Get the words for which embeddings are stored. The words line up with
/// the rows in the matrix returned by `data`.
pub fn words(&self) -> &[String] {
&self.words
}
}
impl<'a> IntoIterator for &'a Embeddings {
type Item = (&'a str, ArrayView1<'a, f32>);
type IntoIter = Iter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// Iterator over embeddings.
pub struct Iter<'a> {
embeddings: &'a Embeddings,
inner: Enumerate<slice::Iter<'a, String>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a str, ArrayView1<'a, f32>);
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|(idx, word)| {
(
word.as_str(),
self.embeddings.matrix.index_axis(Axis(0), idx),
)
})
}
}
pub fn new_embeddings(
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
) -> Embeddings {
Embeddings {
matrix: matrix,
embed_len: embed_len,
indices: indices,
words: words,
}
} | Entry::Vacant(vacant) => vacant.insert(self.words.len()),
Entry::Occupied(_) => bail!(BuilderError::DuplicateWord { word: word }),
};
| random_line_split |
embeddings.rs | use std::cmp::Ordering;
use std::collections::hash_map::Entry;
use std::collections::{BinaryHeap, HashMap, HashSet};
use std::iter::Enumerate;
use std::slice;
use failure::Error;
use ndarray::{Array1, Array2, ArrayView1, ArrayView2, Axis};
/// A word similarity.
///
/// This data structure is used to store a pair consisting of a word and
/// its similarity to a query word.
#[derive(Debug)]
pub struct WordSimilarity<'a> {
pub word: &'a str,
pub similarity: f32,
}
impl<'a> Ord for WordSimilarity<'a> {
fn cmp(&self, other: &Self) -> Ordering {
if self.similarity > other.similarity {
Ordering::Less
} else if self.similarity < other.similarity {
Ordering::Greater
} else {
self.word.cmp(other.word)
}
}
}
impl<'a> PartialOrd for WordSimilarity<'a> {
fn partial_cmp(&self, other: &WordSimilarity) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a> Eq for WordSimilarity<'a> {}
impl<'a> PartialEq for WordSimilarity<'a> {
fn eq(&self, other: &WordSimilarity) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Debug, Fail)]
pub enum BuilderError {
#[fail(
display = "invalid embedding shape, expected: {}, got: {}",
expected_len, len
)]
InvalidEmbeddingLength { expected_len: usize, len: usize },
#[fail(display = "word not unique: {}", word)]
DuplicateWord { word: String },
}
/// Builder for word embedding matrices.
///
/// This builder can be used to construct an embedding matrix. The builder
/// does not assume that the number of embeddings is known ahead of time.
/// The embedding size is determined by the size of the first embedding that
/// is added. All subsequently added embeddings should have the same size.
pub struct Builder {
words: Vec<String>,
indices: HashMap<String, usize>,
embeddings: Vec<Array1<f32>>,
}
impl Builder {
/// Create a builder.
pub fn new() -> Self {
Builder {
words: Vec::new(),
indices: HashMap::new(),
embeddings: Vec::new(),
}
}
/// Construct the final embeddin matrix.
///
/// The `None` is returned when no embedding was added to the builder.
pub fn build(self) -> Option<Embeddings> {
let embed_len = self.embeddings.first()?.shape()[0];
let mut matrix = Array2::zeros((self.embeddings.len(), embed_len));
for (idx, embed) in self.embeddings.into_iter().enumerate() {
matrix.index_axis_mut(Axis(0), idx).assign(&embed);
}
Some(Embeddings {
embed_len,
indices: self.indices,
words: self.words,
matrix,
})
}
/// Add a new embedding to the builder.
///
/// An `Err` value is returned when the word has been inserted in the
/// builder before or when the embedding has a different size than
/// previously inserted embeddings.
pub fn push<S, E>(&mut self, word: S, embedding: E) -> Result<(), Error>
where
S: Into<String>,
E: Into<Array1<f32>>,
{
let word = word.into();
let embedding = embedding.into();
// Check that the embedding has the same length as embeddings that
// were inserted before.
if let Some(first) = self.embeddings.first() {
ensure!(
embedding.shape() == first.shape(),
BuilderError::InvalidEmbeddingLength {
expected_len: first.shape()[0],
len: embedding.shape()[0],
}
);
}
// Insert the word if it was not inserted before.
match self.indices.entry(word.to_owned()) {
Entry::Vacant(vacant) => vacant.insert(self.words.len()),
Entry::Occupied(_) => bail!(BuilderError::DuplicateWord { word: word }),
};
self.words.push(word.clone());
self.embeddings.push(embedding);
Ok(())
}
}
/// Word embeddings.
///
/// This data structure stores word embeddings (also known as *word vectors*)
/// and provides some useful methods on the embeddings, such as similarity
/// and analogy queries.
pub struct Embeddings {
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
}
impl Embeddings {
/// Perform an analogy query.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
) -> Option<Vec<WordSimilarity>> {
self.analogy_by(word1, word2, word3, limit, |embeds, embed| {
embeds.dot(&embed)
})
}
/// Perform an analogy query using the given similarity function.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy_by<F>(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let embedding1 = self
.indices
.get(word1)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding2 = self
.indices
.get(word2)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding3 = self
.indices
.get(word3)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding = (embedding2 - embedding1) + embedding3;
let skip = [word1, word2, word3].iter().cloned().collect();
Some(self.similarity_(embedding.view(), &skip, limit, similarity))
}
/// Get (a view of) the raw embedding matrix.
pub fn data(&self) -> ArrayView2<f32> {
self.matrix.view()
}
/// Return the length (in vector components) of the word embeddings.
pub fn embed_len(&self) -> usize {
self.embed_len
}
/// Get the embedding of a word.
pub fn embedding(&self, word: &str) -> Option<ArrayView1<f32>> {
self.indices
.get(word)
.map(|idx| self.matrix.index_axis(Axis(0), *idx))
}
/// Get the mapping from words to row indices of the embedding matrix.
pub fn indices(&self) -> &HashMap<String, usize> |
/// Get an iterator over pairs of words and the corresponding embeddings.
pub fn iter(&self) -> Iter {
Iter {
embeddings: self,
inner: self.words.iter().enumerate(),
}
}
/// Normalize the embeddings using their L2 (euclidean) norms.
///
/// **Note:** when you are using the output of e.g. word2vec, you should
/// normalize the embeddings to get good query results.
pub fn normalize(&mut self) {
for mut embedding in self.matrix.outer_iter_mut() {
let l2norm = embedding.dot(&embedding).sqrt();
if l2norm != 0f32 {
embedding /= l2norm;
}
}
}
/// Find words that are similar to the query word.
///
/// The similarity between two words is defined by the dot product of
/// the embeddings. If the vectors are unit vectors (e.g. by virtue of
/// calling `normalize`), this is the cosine similarity. At most, `limit`
/// results are returned.
pub fn similarity(&self, word: &str, limit: usize) -> Option<Vec<WordSimilarity>> {
self.similarity_by(word, limit, |embeds, embed| embeds.dot(&embed))
}
/// Find words that are similar to the query word using the given similarity
/// function.
///
/// The similarity function should return, given the embeddings matrix and
/// the word vector a vector of similarity scores. At most, `limit` results
/// are returned.
pub fn similarity_by<F>(
&self,
word: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
self.indices.get(word).map(|idx| {
let embedding = self.matrix.index_axis(Axis(0), *idx);
let mut skip = HashSet::new();
skip.insert(word);
self.similarity_(embedding, &skip, limit, similarity)
})
}
fn similarity_<F>(
&self,
embed: ArrayView1<f32>,
skip: &HashSet<&str>,
limit: usize,
mut similarity: F,
) -> Vec<WordSimilarity>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let sims = similarity(self.matrix.view(), embed);
let mut results: BinaryHeap<WordSimilarity> = BinaryHeap::new();
for (idx, sim) in sims.iter().enumerate() {
let word = self.words[idx].as_ref();
// Don't add words that we are explicitly asked to skip.
if skip.contains(word) {
continue;
}
let word_distance = WordSimilarity {
word: word,
similarity: *sim,
};
if results.len() == limit {
if let Some(mut min_distance) = results.peek_mut() {
if word_distance.similarity > min_distance.similarity {
*min_distance = word_distance
}
}
} else {
results.push(word_distance);
}
}
results.into_sorted_vec()
}
/// Get the number of words for which embeddings are stored.
pub fn len(&self) -> usize {
self.words.len()
}
/// Get the words for which embeddings are stored. The words line up with
/// the rows in the matrix returned by `data`.
pub fn words(&self) -> &[String] {
&self.words
}
}
impl<'a> IntoIterator for &'a Embeddings {
type Item = (&'a str, ArrayView1<'a, f32>);
type IntoIter = Iter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// Iterator over embeddings.
pub struct Iter<'a> {
embeddings: &'a Embeddings,
inner: Enumerate<slice::Iter<'a, String>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a str, ArrayView1<'a, f32>);
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|(idx, word)| {
(
word.as_str(),
self.embeddings.matrix.index_axis(Axis(0), idx),
)
})
}
}
pub fn new_embeddings(
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
) -> Embeddings {
Embeddings {
matrix: matrix,
embed_len: embed_len,
indices: indices,
words: words,
}
}
| {
&self.indices
} | identifier_body |
embeddings.rs | use std::cmp::Ordering;
use std::collections::hash_map::Entry;
use std::collections::{BinaryHeap, HashMap, HashSet};
use std::iter::Enumerate;
use std::slice;
use failure::Error;
use ndarray::{Array1, Array2, ArrayView1, ArrayView2, Axis};
/// A word similarity.
///
/// This data structure is used to store a pair consisting of a word and
/// its similarity to a query word.
#[derive(Debug)]
pub struct WordSimilarity<'a> {
pub word: &'a str,
pub similarity: f32,
}
impl<'a> Ord for WordSimilarity<'a> {
fn cmp(&self, other: &Self) -> Ordering {
if self.similarity > other.similarity {
Ordering::Less
} else if self.similarity < other.similarity {
Ordering::Greater
} else |
}
}
impl<'a> PartialOrd for WordSimilarity<'a> {
fn partial_cmp(&self, other: &WordSimilarity) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a> Eq for WordSimilarity<'a> {}
impl<'a> PartialEq for WordSimilarity<'a> {
fn eq(&self, other: &WordSimilarity) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Debug, Fail)]
pub enum BuilderError {
#[fail(
display = "invalid embedding shape, expected: {}, got: {}",
expected_len, len
)]
InvalidEmbeddingLength { expected_len: usize, len: usize },
#[fail(display = "word not unique: {}", word)]
DuplicateWord { word: String },
}
/// Builder for word embedding matrices.
///
/// This builder can be used to construct an embedding matrix. The builder
/// does not assume that the number of embeddings is known ahead of time.
/// The embedding size is determined by the size of the first embedding that
/// is added. All subsequently added embeddings should have the same size.
pub struct Builder {
words: Vec<String>,
indices: HashMap<String, usize>,
embeddings: Vec<Array1<f32>>,
}
impl Builder {
/// Create a builder.
pub fn new() -> Self {
Builder {
words: Vec::new(),
indices: HashMap::new(),
embeddings: Vec::new(),
}
}
/// Construct the final embeddin matrix.
///
/// The `None` is returned when no embedding was added to the builder.
pub fn build(self) -> Option<Embeddings> {
let embed_len = self.embeddings.first()?.shape()[0];
let mut matrix = Array2::zeros((self.embeddings.len(), embed_len));
for (idx, embed) in self.embeddings.into_iter().enumerate() {
matrix.index_axis_mut(Axis(0), idx).assign(&embed);
}
Some(Embeddings {
embed_len,
indices: self.indices,
words: self.words,
matrix,
})
}
/// Add a new embedding to the builder.
///
/// An `Err` value is returned when the word has been inserted in the
/// builder before or when the embedding has a different size than
/// previously inserted embeddings.
pub fn push<S, E>(&mut self, word: S, embedding: E) -> Result<(), Error>
where
S: Into<String>,
E: Into<Array1<f32>>,
{
let word = word.into();
let embedding = embedding.into();
// Check that the embedding has the same length as embeddings that
// were inserted before.
if let Some(first) = self.embeddings.first() {
ensure!(
embedding.shape() == first.shape(),
BuilderError::InvalidEmbeddingLength {
expected_len: first.shape()[0],
len: embedding.shape()[0],
}
);
}
// Insert the word if it was not inserted before.
match self.indices.entry(word.to_owned()) {
Entry::Vacant(vacant) => vacant.insert(self.words.len()),
Entry::Occupied(_) => bail!(BuilderError::DuplicateWord { word: word }),
};
self.words.push(word.clone());
self.embeddings.push(embedding);
Ok(())
}
}
/// Word embeddings.
///
/// This data structure stores word embeddings (also known as *word vectors*)
/// and provides some useful methods on the embeddings, such as similarity
/// and analogy queries.
pub struct Embeddings {
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
}
impl Embeddings {
/// Perform an analogy query.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
) -> Option<Vec<WordSimilarity>> {
self.analogy_by(word1, word2, word3, limit, |embeds, embed| {
embeds.dot(&embed)
})
}
/// Perform an analogy query using the given similarity function.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy_by<F>(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let embedding1 = self
.indices
.get(word1)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding2 = self
.indices
.get(word2)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding3 = self
.indices
.get(word3)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding = (embedding2 - embedding1) + embedding3;
let skip = [word1, word2, word3].iter().cloned().collect();
Some(self.similarity_(embedding.view(), &skip, limit, similarity))
}
/// Get (a view of) the raw embedding matrix.
pub fn data(&self) -> ArrayView2<f32> {
self.matrix.view()
}
/// Return the length (in vector components) of the word embeddings.
pub fn embed_len(&self) -> usize {
self.embed_len
}
/// Get the embedding of a word.
pub fn embedding(&self, word: &str) -> Option<ArrayView1<f32>> {
self.indices
.get(word)
.map(|idx| self.matrix.index_axis(Axis(0), *idx))
}
/// Get the mapping from words to row indices of the embedding matrix.
pub fn indices(&self) -> &HashMap<String, usize> {
&self.indices
}
/// Get an iterator over pairs of words and the corresponding embeddings.
pub fn iter(&self) -> Iter {
Iter {
embeddings: self,
inner: self.words.iter().enumerate(),
}
}
/// Normalize the embeddings using their L2 (euclidean) norms.
///
/// **Note:** when you are using the output of e.g. word2vec, you should
/// normalize the embeddings to get good query results.
pub fn normalize(&mut self) {
for mut embedding in self.matrix.outer_iter_mut() {
let l2norm = embedding.dot(&embedding).sqrt();
if l2norm != 0f32 {
embedding /= l2norm;
}
}
}
/// Find words that are similar to the query word.
///
/// The similarity between two words is defined by the dot product of
/// the embeddings. If the vectors are unit vectors (e.g. by virtue of
/// calling `normalize`), this is the cosine similarity. At most, `limit`
/// results are returned.
pub fn similarity(&self, word: &str, limit: usize) -> Option<Vec<WordSimilarity>> {
self.similarity_by(word, limit, |embeds, embed| embeds.dot(&embed))
}
/// Find words that are similar to the query word using the given similarity
/// function.
///
/// The similarity function should return, given the embeddings matrix and
/// the word vector a vector of similarity scores. At most, `limit` results
/// are returned.
pub fn similarity_by<F>(
&self,
word: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
self.indices.get(word).map(|idx| {
let embedding = self.matrix.index_axis(Axis(0), *idx);
let mut skip = HashSet::new();
skip.insert(word);
self.similarity_(embedding, &skip, limit, similarity)
})
}
fn similarity_<F>(
&self,
embed: ArrayView1<f32>,
skip: &HashSet<&str>,
limit: usize,
mut similarity: F,
) -> Vec<WordSimilarity>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let sims = similarity(self.matrix.view(), embed);
let mut results: BinaryHeap<WordSimilarity> = BinaryHeap::new();
for (idx, sim) in sims.iter().enumerate() {
let word = self.words[idx].as_ref();
// Don't add words that we are explicitly asked to skip.
if skip.contains(word) {
continue;
}
let word_distance = WordSimilarity {
word: word,
similarity: *sim,
};
if results.len() == limit {
if let Some(mut min_distance) = results.peek_mut() {
if word_distance.similarity > min_distance.similarity {
*min_distance = word_distance
}
}
} else {
results.push(word_distance);
}
}
results.into_sorted_vec()
}
/// Get the number of words for which embeddings are stored.
pub fn len(&self) -> usize {
self.words.len()
}
/// Get the words for which embeddings are stored. The words line up with
/// the rows in the matrix returned by `data`.
pub fn words(&self) -> &[String] {
&self.words
}
}
impl<'a> IntoIterator for &'a Embeddings {
type Item = (&'a str, ArrayView1<'a, f32>);
type IntoIter = Iter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// Iterator over embeddings.
pub struct Iter<'a> {
embeddings: &'a Embeddings,
inner: Enumerate<slice::Iter<'a, String>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a str, ArrayView1<'a, f32>);
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|(idx, word)| {
(
word.as_str(),
self.embeddings.matrix.index_axis(Axis(0), idx),
)
})
}
}
pub fn new_embeddings(
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
) -> Embeddings {
Embeddings {
matrix: matrix,
embed_len: embed_len,
indices: indices,
words: words,
}
}
| {
self.word.cmp(other.word)
} | conditional_block |
embeddings.rs | use std::cmp::Ordering;
use std::collections::hash_map::Entry;
use std::collections::{BinaryHeap, HashMap, HashSet};
use std::iter::Enumerate;
use std::slice;
use failure::Error;
use ndarray::{Array1, Array2, ArrayView1, ArrayView2, Axis};
/// A word similarity.
///
/// This data structure is used to store a pair consisting of a word and
/// its similarity to a query word.
#[derive(Debug)]
pub struct WordSimilarity<'a> {
pub word: &'a str,
pub similarity: f32,
}
impl<'a> Ord for WordSimilarity<'a> {
fn cmp(&self, other: &Self) -> Ordering {
if self.similarity > other.similarity {
Ordering::Less
} else if self.similarity < other.similarity {
Ordering::Greater
} else {
self.word.cmp(other.word)
}
}
}
impl<'a> PartialOrd for WordSimilarity<'a> {
fn partial_cmp(&self, other: &WordSimilarity) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a> Eq for WordSimilarity<'a> {}
impl<'a> PartialEq for WordSimilarity<'a> {
fn eq(&self, other: &WordSimilarity) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Debug, Fail)]
pub enum | {
#[fail(
display = "invalid embedding shape, expected: {}, got: {}",
expected_len, len
)]
InvalidEmbeddingLength { expected_len: usize, len: usize },
#[fail(display = "word not unique: {}", word)]
DuplicateWord { word: String },
}
/// Builder for word embedding matrices.
///
/// This builder can be used to construct an embedding matrix. The builder
/// does not assume that the number of embeddings is known ahead of time.
/// The embedding size is determined by the size of the first embedding that
/// is added. All subsequently added embeddings should have the same size.
pub struct Builder {
words: Vec<String>,
indices: HashMap<String, usize>,
embeddings: Vec<Array1<f32>>,
}
impl Builder {
/// Create a builder.
pub fn new() -> Self {
Builder {
words: Vec::new(),
indices: HashMap::new(),
embeddings: Vec::new(),
}
}
/// Construct the final embeddin matrix.
///
/// The `None` is returned when no embedding was added to the builder.
pub fn build(self) -> Option<Embeddings> {
let embed_len = self.embeddings.first()?.shape()[0];
let mut matrix = Array2::zeros((self.embeddings.len(), embed_len));
for (idx, embed) in self.embeddings.into_iter().enumerate() {
matrix.index_axis_mut(Axis(0), idx).assign(&embed);
}
Some(Embeddings {
embed_len,
indices: self.indices,
words: self.words,
matrix,
})
}
/// Add a new embedding to the builder.
///
/// An `Err` value is returned when the word has been inserted in the
/// builder before or when the embedding has a different size than
/// previously inserted embeddings.
pub fn push<S, E>(&mut self, word: S, embedding: E) -> Result<(), Error>
where
S: Into<String>,
E: Into<Array1<f32>>,
{
let word = word.into();
let embedding = embedding.into();
// Check that the embedding has the same length as embeddings that
// were inserted before.
if let Some(first) = self.embeddings.first() {
ensure!(
embedding.shape() == first.shape(),
BuilderError::InvalidEmbeddingLength {
expected_len: first.shape()[0],
len: embedding.shape()[0],
}
);
}
// Insert the word if it was not inserted before.
match self.indices.entry(word.to_owned()) {
Entry::Vacant(vacant) => vacant.insert(self.words.len()),
Entry::Occupied(_) => bail!(BuilderError::DuplicateWord { word: word }),
};
self.words.push(word.clone());
self.embeddings.push(embedding);
Ok(())
}
}
/// Word embeddings.
///
/// This data structure stores word embeddings (also known as *word vectors*)
/// and provides some useful methods on the embeddings, such as similarity
/// and analogy queries.
pub struct Embeddings {
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
}
impl Embeddings {
/// Perform an analogy query.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
) -> Option<Vec<WordSimilarity>> {
self.analogy_by(word1, word2, word3, limit, |embeds, embed| {
embeds.dot(&embed)
})
}
/// Perform an analogy query using the given similarity function.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy_by<F>(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let embedding1 = self
.indices
.get(word1)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding2 = self
.indices
.get(word2)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding3 = self
.indices
.get(word3)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding = (embedding2 - embedding1) + embedding3;
let skip = [word1, word2, word3].iter().cloned().collect();
Some(self.similarity_(embedding.view(), &skip, limit, similarity))
}
/// Get (a view of) the raw embedding matrix.
pub fn data(&self) -> ArrayView2<f32> {
self.matrix.view()
}
/// Return the length (in vector components) of the word embeddings.
pub fn embed_len(&self) -> usize {
self.embed_len
}
/// Get the embedding of a word.
pub fn embedding(&self, word: &str) -> Option<ArrayView1<f32>> {
self.indices
.get(word)
.map(|idx| self.matrix.index_axis(Axis(0), *idx))
}
/// Get the mapping from words to row indices of the embedding matrix.
pub fn indices(&self) -> &HashMap<String, usize> {
&self.indices
}
/// Get an iterator over pairs of words and the corresponding embeddings.
pub fn iter(&self) -> Iter {
Iter {
embeddings: self,
inner: self.words.iter().enumerate(),
}
}
/// Normalize the embeddings using their L2 (euclidean) norms.
///
/// **Note:** when you are using the output of e.g. word2vec, you should
/// normalize the embeddings to get good query results.
pub fn normalize(&mut self) {
for mut embedding in self.matrix.outer_iter_mut() {
let l2norm = embedding.dot(&embedding).sqrt();
if l2norm != 0f32 {
embedding /= l2norm;
}
}
}
/// Find words that are similar to the query word.
///
/// The similarity between two words is defined by the dot product of
/// the embeddings. If the vectors are unit vectors (e.g. by virtue of
/// calling `normalize`), this is the cosine similarity. At most, `limit`
/// results are returned.
pub fn similarity(&self, word: &str, limit: usize) -> Option<Vec<WordSimilarity>> {
self.similarity_by(word, limit, |embeds, embed| embeds.dot(&embed))
}
/// Find words that are similar to the query word using the given similarity
/// function.
///
/// The similarity function should return, given the embeddings matrix and
/// the word vector a vector of similarity scores. At most, `limit` results
/// are returned.
pub fn similarity_by<F>(
&self,
word: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
self.indices.get(word).map(|idx| {
let embedding = self.matrix.index_axis(Axis(0), *idx);
let mut skip = HashSet::new();
skip.insert(word);
self.similarity_(embedding, &skip, limit, similarity)
})
}
fn similarity_<F>(
&self,
embed: ArrayView1<f32>,
skip: &HashSet<&str>,
limit: usize,
mut similarity: F,
) -> Vec<WordSimilarity>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let sims = similarity(self.matrix.view(), embed);
let mut results: BinaryHeap<WordSimilarity> = BinaryHeap::new();
for (idx, sim) in sims.iter().enumerate() {
let word = self.words[idx].as_ref();
// Don't add words that we are explicitly asked to skip.
if skip.contains(word) {
continue;
}
let word_distance = WordSimilarity {
word: word,
similarity: *sim,
};
if results.len() == limit {
if let Some(mut min_distance) = results.peek_mut() {
if word_distance.similarity > min_distance.similarity {
*min_distance = word_distance
}
}
} else {
results.push(word_distance);
}
}
results.into_sorted_vec()
}
/// Get the number of words for which embeddings are stored.
pub fn len(&self) -> usize {
self.words.len()
}
/// Get the words for which embeddings are stored. The words line up with
/// the rows in the matrix returned by `data`.
pub fn words(&self) -> &[String] {
&self.words
}
}
impl<'a> IntoIterator for &'a Embeddings {
type Item = (&'a str, ArrayView1<'a, f32>);
type IntoIter = Iter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// Iterator over embeddings.
pub struct Iter<'a> {
embeddings: &'a Embeddings,
inner: Enumerate<slice::Iter<'a, String>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a str, ArrayView1<'a, f32>);
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|(idx, word)| {
(
word.as_str(),
self.embeddings.matrix.index_axis(Axis(0), idx),
)
})
}
}
pub fn new_embeddings(
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
) -> Embeddings {
Embeddings {
matrix: matrix,
embed_len: embed_len,
indices: indices,
words: words,
}
}
| BuilderError | identifier_name |
utils.rs | use std::cmp::Ordering;
use std::collections::HashSet;
use crate::id;
use serde::{Deserialize, Serialize};
use swc_core::common::{Mark, Span, SyntaxContext, DUMMY_SP};
use swc_core::ecma::ast::{self, Id};
use swc_core::ecma::atoms::{js_word, JsWord};
pub fn match_member_expr(expr: &ast::MemberExpr, idents: Vec<&str>, decls: &HashSet<Id>) -> bool {
use ast::{Expr, Ident, Lit, MemberProp, Str};
let mut member = expr;
let mut idents = idents;
while idents.len() > 1 {
let expected = idents.pop().unwrap();
let prop = match &member.prop {
MemberProp::Computed(comp) => {
if let Expr::Lit(Lit::Str(Str { value: ref sym, .. })) = *comp.expr {
sym
} else {
return false;
}
}
MemberProp::Ident(Ident { ref sym, .. }) => sym,
_ => return false,
};
if prop != expected {
return false;
}
match &*member.obj {
Expr::Member(m) => member = m,
Expr::Ident(id) => {
return idents.len() == 1 && &id.sym == idents.pop().unwrap() && !decls.contains(&id!(id));
}
_ => return false,
}
}
false
}
pub fn create_require(specifier: swc_core::ecma::atoms::JsWord) -> ast::CallExpr {
let mut normalized_specifier = specifier;
if normalized_specifier.starts_with("node:") {
normalized_specifier = normalized_specifier.replace("node:", "").into();
}
ast::CallExpr {
callee: ast::Callee::Expr(Box::new(ast::Expr::Ident(ast::Ident::new(
"require".into(),
DUMMY_SP,
)))),
args: vec![ast::ExprOrSpread {
expr: Box::new(ast::Expr::Lit(ast::Lit::Str(normalized_specifier.into()))),
spread: None,
}],
span: DUMMY_SP,
type_args: None,
}
}
fn is_marked(span: Span, mark: Mark) -> bool {
let mut ctxt = span.ctxt();
loop {
let m = ctxt.remove_mark();
if m == Mark::root() {
return false;
}
if m == mark {
return true;
}
}
}
pub fn match_str(node: &ast::Expr) -> Option<(JsWord, Span)> {
use ast::*;
match node {
// "string" or 'string'
Expr::Lit(Lit::Str(s)) => Some((s.value.clone(), s.span)),
// `string`
Expr::Tpl(tpl) if tpl.quasis.len() == 1 && tpl.exprs.is_empty() => {
Some(((*tpl.quasis[0].raw).into(), tpl.span))
}
_ => None,
}
}
pub fn match_property_name(node: &ast::MemberExpr) -> Option<(JsWord, Span)> {
match &node.prop {
ast::MemberProp::Computed(s) => match_str(&s.expr),
ast::MemberProp::Ident(id) => Some((id.sym.clone(), id.span)),
ast::MemberProp::PrivateName(_) => None,
}
}
pub fn match_export_name(name: &ast::ModuleExportName) -> (JsWord, Span) {
match name {
ast::ModuleExportName::Ident(id) => (id.sym.clone(), id.span),
ast::ModuleExportName::Str(s) => (s.value.clone(), s.span),
}
}
/// Properties like `ExportNamedSpecifier::orig` have to be an Ident if `src` is `None`
pub fn match_export_name_ident(name: &ast::ModuleExportName) -> &ast::Ident {
match name {
ast::ModuleExportName::Ident(id) => id,
ast::ModuleExportName::Str(_) => unreachable!(),
}
}
pub fn match_require(node: &ast::Expr, decls: &HashSet<Id>, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Expr(expr) => match &**expr {
Expr::Ident(ident) => {
if ident.sym == js_word!("require")
&& !decls.contains(&(ident.sym.clone(), ident.span.ctxt))
&& !is_marked(ident.span, ignore_mark)
{
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
Expr::Member(member) => {
if match_member_expr(member, vec!["module", "require"], decls) {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
_ => None,
},
_ => None,
},
_ => None,
}
}
pub fn match_import(node: &ast::Expr, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Import(ident) if !is_marked(ident.span, ignore_mark) => {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
None
}
_ => None,
},
_ => None,
}
}
// `name` must not be an existing binding.
pub fn create_global_decl_stmt(
name: swc_core::ecma::atoms::JsWord,
init: ast::Expr,
global_mark: Mark,
) -> (ast::Stmt, SyntaxContext) {
// The correct value would actually be `DUMMY_SP.apply_mark(Mark::fresh(Mark::root()))`.
// But this saves us from running the resolver again in some cases.
let span = DUMMY_SP.apply_mark(global_mark);
(
ast::Stmt::Decl(ast::Decl::Var(Box::new(ast::VarDecl {
kind: ast::VarDeclKind::Var,
declare: false,
span: DUMMY_SP,
decls: vec![ast::VarDeclarator {
name: ast::Pat::Ident(ast::BindingIdent::from(ast::Ident::new(name, span))),
span: DUMMY_SP,
definite: false,
init: Some(Box::new(init)),
}],
}))),
span.ctxt,
)
}
pub fn get_undefined_ident(unresolved_mark: Mark) -> ast::Ident {
ast::Ident::new(js_word!("undefined"), DUMMY_SP.apply_mark(unresolved_mark))
}
#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)]
/// Corresponds to the JS SourceLocation type (1-based, end exclusive)
pub struct SourceLocation {
pub start_line: usize,
pub start_col: usize,
pub end_line: usize,
pub end_col: usize,
}
impl SourceLocation {
pub fn from(source_map: &swc_core::common::SourceMap, span: swc_core::common::Span) -> Self {
if span.lo.is_dummy() || span.hi.is_dummy() {
return SourceLocation {
start_line: 1,
start_col: 1,
end_line: 1,
end_col: 2,
};
}
let start = source_map.lookup_char_pos(span.lo);
let end = source_map.lookup_char_pos(span.hi);
// SWC's columns are exclusive, ours are exclusive
// SWC has 0-based columns, ours are 1-based (column + 1)
SourceLocation {
start_line: start.line,
start_col: start.col_display + 1,
end_line: end.line,
end_col: end.col_display + 1,
}
}
}
impl PartialOrd for SourceLocation {
fn partial_cmp(&self, other: &SourceLocation) -> Option<Ordering> {
match self.start_line.cmp(&other.start_line) {
Ordering::Equal => self.start_col.partial_cmp(&other.start_col),
o => Some(o),
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CodeHighlight {
pub message: Option<String>,
pub loc: SourceLocation,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Diagnostic {
pub message: String,
pub code_highlights: Option<Vec<CodeHighlight>>,
pub hints: Option<Vec<String>>,
pub show_environment: bool,
pub severity: DiagnosticSeverity,
pub documentation_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub enum DiagnosticSeverity {
/// Fails the build with an error.
Error,
/// Logs a warning, but the build does not fail.
Warning,
/// An error if this is source code in the project, or a warning if in node_modules.
SourceError,
}
#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Clone, Copy)]
pub enum SourceType {
Script,
Module,
}
#[derive(Debug)]
pub struct Bailout {
pub loc: SourceLocation,
pub reason: BailoutReason,
}
impl Bailout {
pub fn to_diagnostic(&self) -> Diagnostic {
let (message, documentation_url) = self.reason.info();
Diagnostic {
message: message.into(),
documentation_url: Some(documentation_url.into()),
code_highlights: Some(vec![CodeHighlight {
loc: self.loc.clone(),
message: None,
}]),
show_environment: false,
severity: DiagnosticSeverity::Warning,
hints: None,
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum BailoutReason {
NonTopLevelRequire,
NonStaticDestructuring,
TopLevelReturn,
Eval,
NonStaticExports,
FreeModule,
FreeExports,
ExportsReassignment,
ModuleReassignment,
NonStaticDynamicImport,
NonStaticAccess,
}
impl BailoutReason {
fn info(&self) -> (&str, &str) {
match self {
BailoutReason::NonTopLevelRequire => (
"Conditional or non-top-level `require()` call. This causes the resolved module and all dependencies to be wrapped.",
"https://parceljs.org/features/scope-hoisting/#avoid-conditional-require()"
),
BailoutReason::NonStaticDestructuring => (
"Non-static destructuring of `require` or dynamic `import()`. This causes all exports of the resolved module to be included.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::TopLevelReturn => (
"Module contains a top-level `return` statement. This causes the module to be wrapped in a function and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-top-level-return"
),
BailoutReason::Eval => (
"Module contains usage of `eval`. This causes the module to be wrapped in a function and minification to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-eval"
),
BailoutReason::NonStaticExports => (
"Non-static access of CommonJS `exports` object. This causes tree shaking to be disabled for the module.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeModule => (
"Unknown usage of CommonJS `module` object. This causes the module to be wrapped, and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeExports => (
"Unknown usage of CommonJS `exports` object. This causes tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::ExportsReassignment => (
"Module contains a reassignment of the CommonJS `exports` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::ModuleReassignment => (
"Module contains a reassignment of the CommonJS `module` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::NonStaticDynamicImport => (
"Unknown dynamic import usage. This causes tree shaking to be disabled for the resolved module.", | BailoutReason::NonStaticAccess => (
"Non-static access of an `import` or `require`. This causes tree shaking to be disabled for the resolved module.",
"https://parceljs.org/features/scope-hoisting/#dynamic-member-accesses"
),
}
}
}
#[macro_export]
macro_rules! fold_member_expr_skip_prop {
() => {
fn fold_member_expr(
&mut self,
mut node: swc_core::ecma::ast::MemberExpr,
) -> swc_core::ecma::ast::MemberExpr {
node.obj = node.obj.fold_with(self);
if let swc_core::ecma::ast::MemberProp::Computed(_) = node.prop {
node.prop = node.prop.fold_with(self);
}
node
}
};
}
#[macro_export]
macro_rules! id {
($ident: expr) => {
$ident.to_id()
};
} | "https://parceljs.org/features/scope-hoisting/#dynamic-imports"
), | random_line_split |
utils.rs | use std::cmp::Ordering;
use std::collections::HashSet;
use crate::id;
use serde::{Deserialize, Serialize};
use swc_core::common::{Mark, Span, SyntaxContext, DUMMY_SP};
use swc_core::ecma::ast::{self, Id};
use swc_core::ecma::atoms::{js_word, JsWord};
pub fn match_member_expr(expr: &ast::MemberExpr, idents: Vec<&str>, decls: &HashSet<Id>) -> bool {
use ast::{Expr, Ident, Lit, MemberProp, Str};
let mut member = expr;
let mut idents = idents;
while idents.len() > 1 {
let expected = idents.pop().unwrap();
let prop = match &member.prop {
MemberProp::Computed(comp) => {
if let Expr::Lit(Lit::Str(Str { value: ref sym, .. })) = *comp.expr {
sym
} else {
return false;
}
}
MemberProp::Ident(Ident { ref sym, .. }) => sym,
_ => return false,
};
if prop != expected {
return false;
}
match &*member.obj {
Expr::Member(m) => member = m,
Expr::Ident(id) => {
return idents.len() == 1 && &id.sym == idents.pop().unwrap() && !decls.contains(&id!(id));
}
_ => return false,
}
}
false
}
pub fn | (specifier: swc_core::ecma::atoms::JsWord) -> ast::CallExpr {
let mut normalized_specifier = specifier;
if normalized_specifier.starts_with("node:") {
normalized_specifier = normalized_specifier.replace("node:", "").into();
}
ast::CallExpr {
callee: ast::Callee::Expr(Box::new(ast::Expr::Ident(ast::Ident::new(
"require".into(),
DUMMY_SP,
)))),
args: vec![ast::ExprOrSpread {
expr: Box::new(ast::Expr::Lit(ast::Lit::Str(normalized_specifier.into()))),
spread: None,
}],
span: DUMMY_SP,
type_args: None,
}
}
fn is_marked(span: Span, mark: Mark) -> bool {
let mut ctxt = span.ctxt();
loop {
let m = ctxt.remove_mark();
if m == Mark::root() {
return false;
}
if m == mark {
return true;
}
}
}
pub fn match_str(node: &ast::Expr) -> Option<(JsWord, Span)> {
use ast::*;
match node {
// "string" or 'string'
Expr::Lit(Lit::Str(s)) => Some((s.value.clone(), s.span)),
// `string`
Expr::Tpl(tpl) if tpl.quasis.len() == 1 && tpl.exprs.is_empty() => {
Some(((*tpl.quasis[0].raw).into(), tpl.span))
}
_ => None,
}
}
pub fn match_property_name(node: &ast::MemberExpr) -> Option<(JsWord, Span)> {
match &node.prop {
ast::MemberProp::Computed(s) => match_str(&s.expr),
ast::MemberProp::Ident(id) => Some((id.sym.clone(), id.span)),
ast::MemberProp::PrivateName(_) => None,
}
}
pub fn match_export_name(name: &ast::ModuleExportName) -> (JsWord, Span) {
match name {
ast::ModuleExportName::Ident(id) => (id.sym.clone(), id.span),
ast::ModuleExportName::Str(s) => (s.value.clone(), s.span),
}
}
/// Properties like `ExportNamedSpecifier::orig` have to be an Ident if `src` is `None`
pub fn match_export_name_ident(name: &ast::ModuleExportName) -> &ast::Ident {
match name {
ast::ModuleExportName::Ident(id) => id,
ast::ModuleExportName::Str(_) => unreachable!(),
}
}
pub fn match_require(node: &ast::Expr, decls: &HashSet<Id>, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Expr(expr) => match &**expr {
Expr::Ident(ident) => {
if ident.sym == js_word!("require")
&& !decls.contains(&(ident.sym.clone(), ident.span.ctxt))
&& !is_marked(ident.span, ignore_mark)
{
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
Expr::Member(member) => {
if match_member_expr(member, vec!["module", "require"], decls) {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
_ => None,
},
_ => None,
},
_ => None,
}
}
pub fn match_import(node: &ast::Expr, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Import(ident) if !is_marked(ident.span, ignore_mark) => {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
None
}
_ => None,
},
_ => None,
}
}
// `name` must not be an existing binding.
pub fn create_global_decl_stmt(
name: swc_core::ecma::atoms::JsWord,
init: ast::Expr,
global_mark: Mark,
) -> (ast::Stmt, SyntaxContext) {
// The correct value would actually be `DUMMY_SP.apply_mark(Mark::fresh(Mark::root()))`.
// But this saves us from running the resolver again in some cases.
let span = DUMMY_SP.apply_mark(global_mark);
(
ast::Stmt::Decl(ast::Decl::Var(Box::new(ast::VarDecl {
kind: ast::VarDeclKind::Var,
declare: false,
span: DUMMY_SP,
decls: vec![ast::VarDeclarator {
name: ast::Pat::Ident(ast::BindingIdent::from(ast::Ident::new(name, span))),
span: DUMMY_SP,
definite: false,
init: Some(Box::new(init)),
}],
}))),
span.ctxt,
)
}
pub fn get_undefined_ident(unresolved_mark: Mark) -> ast::Ident {
ast::Ident::new(js_word!("undefined"), DUMMY_SP.apply_mark(unresolved_mark))
}
#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)]
/// Corresponds to the JS SourceLocation type (1-based, end exclusive)
pub struct SourceLocation {
pub start_line: usize,
pub start_col: usize,
pub end_line: usize,
pub end_col: usize,
}
impl SourceLocation {
pub fn from(source_map: &swc_core::common::SourceMap, span: swc_core::common::Span) -> Self {
if span.lo.is_dummy() || span.hi.is_dummy() {
return SourceLocation {
start_line: 1,
start_col: 1,
end_line: 1,
end_col: 2,
};
}
let start = source_map.lookup_char_pos(span.lo);
let end = source_map.lookup_char_pos(span.hi);
// SWC's columns are exclusive, ours are exclusive
// SWC has 0-based columns, ours are 1-based (column + 1)
SourceLocation {
start_line: start.line,
start_col: start.col_display + 1,
end_line: end.line,
end_col: end.col_display + 1,
}
}
}
impl PartialOrd for SourceLocation {
fn partial_cmp(&self, other: &SourceLocation) -> Option<Ordering> {
match self.start_line.cmp(&other.start_line) {
Ordering::Equal => self.start_col.partial_cmp(&other.start_col),
o => Some(o),
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CodeHighlight {
pub message: Option<String>,
pub loc: SourceLocation,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Diagnostic {
pub message: String,
pub code_highlights: Option<Vec<CodeHighlight>>,
pub hints: Option<Vec<String>>,
pub show_environment: bool,
pub severity: DiagnosticSeverity,
pub documentation_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub enum DiagnosticSeverity {
/// Fails the build with an error.
Error,
/// Logs a warning, but the build does not fail.
Warning,
/// An error if this is source code in the project, or a warning if in node_modules.
SourceError,
}
#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Clone, Copy)]
pub enum SourceType {
Script,
Module,
}
#[derive(Debug)]
pub struct Bailout {
pub loc: SourceLocation,
pub reason: BailoutReason,
}
impl Bailout {
pub fn to_diagnostic(&self) -> Diagnostic {
let (message, documentation_url) = self.reason.info();
Diagnostic {
message: message.into(),
documentation_url: Some(documentation_url.into()),
code_highlights: Some(vec![CodeHighlight {
loc: self.loc.clone(),
message: None,
}]),
show_environment: false,
severity: DiagnosticSeverity::Warning,
hints: None,
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum BailoutReason {
NonTopLevelRequire,
NonStaticDestructuring,
TopLevelReturn,
Eval,
NonStaticExports,
FreeModule,
FreeExports,
ExportsReassignment,
ModuleReassignment,
NonStaticDynamicImport,
NonStaticAccess,
}
impl BailoutReason {
fn info(&self) -> (&str, &str) {
match self {
BailoutReason::NonTopLevelRequire => (
"Conditional or non-top-level `require()` call. This causes the resolved module and all dependencies to be wrapped.",
"https://parceljs.org/features/scope-hoisting/#avoid-conditional-require()"
),
BailoutReason::NonStaticDestructuring => (
"Non-static destructuring of `require` or dynamic `import()`. This causes all exports of the resolved module to be included.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::TopLevelReturn => (
"Module contains a top-level `return` statement. This causes the module to be wrapped in a function and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-top-level-return"
),
BailoutReason::Eval => (
"Module contains usage of `eval`. This causes the module to be wrapped in a function and minification to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-eval"
),
BailoutReason::NonStaticExports => (
"Non-static access of CommonJS `exports` object. This causes tree shaking to be disabled for the module.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeModule => (
"Unknown usage of CommonJS `module` object. This causes the module to be wrapped, and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeExports => (
"Unknown usage of CommonJS `exports` object. This causes tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::ExportsReassignment => (
"Module contains a reassignment of the CommonJS `exports` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::ModuleReassignment => (
"Module contains a reassignment of the CommonJS `module` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::NonStaticDynamicImport => (
"Unknown dynamic import usage. This causes tree shaking to be disabled for the resolved module.",
"https://parceljs.org/features/scope-hoisting/#dynamic-imports"
),
BailoutReason::NonStaticAccess => (
"Non-static access of an `import` or `require`. This causes tree shaking to be disabled for the resolved module.",
"https://parceljs.org/features/scope-hoisting/#dynamic-member-accesses"
),
}
}
}
#[macro_export]
macro_rules! fold_member_expr_skip_prop {
() => {
fn fold_member_expr(
&mut self,
mut node: swc_core::ecma::ast::MemberExpr,
) -> swc_core::ecma::ast::MemberExpr {
node.obj = node.obj.fold_with(self);
if let swc_core::ecma::ast::MemberProp::Computed(_) = node.prop {
node.prop = node.prop.fold_with(self);
}
node
}
};
}
#[macro_export]
macro_rules! id {
($ident: expr) => {
$ident.to_id()
};
}
| create_require | identifier_name |
utils.rs | use std::cmp::Ordering;
use std::collections::HashSet;
use crate::id;
use serde::{Deserialize, Serialize};
use swc_core::common::{Mark, Span, SyntaxContext, DUMMY_SP};
use swc_core::ecma::ast::{self, Id};
use swc_core::ecma::atoms::{js_word, JsWord};
pub fn match_member_expr(expr: &ast::MemberExpr, idents: Vec<&str>, decls: &HashSet<Id>) -> bool {
use ast::{Expr, Ident, Lit, MemberProp, Str};
let mut member = expr;
let mut idents = idents;
while idents.len() > 1 {
let expected = idents.pop().unwrap();
let prop = match &member.prop {
MemberProp::Computed(comp) => {
if let Expr::Lit(Lit::Str(Str { value: ref sym, .. })) = *comp.expr {
sym
} else |
}
MemberProp::Ident(Ident { ref sym, .. }) => sym,
_ => return false,
};
if prop != expected {
return false;
}
match &*member.obj {
Expr::Member(m) => member = m,
Expr::Ident(id) => {
return idents.len() == 1 && &id.sym == idents.pop().unwrap() && !decls.contains(&id!(id));
}
_ => return false,
}
}
false
}
pub fn create_require(specifier: swc_core::ecma::atoms::JsWord) -> ast::CallExpr {
let mut normalized_specifier = specifier;
if normalized_specifier.starts_with("node:") {
normalized_specifier = normalized_specifier.replace("node:", "").into();
}
ast::CallExpr {
callee: ast::Callee::Expr(Box::new(ast::Expr::Ident(ast::Ident::new(
"require".into(),
DUMMY_SP,
)))),
args: vec![ast::ExprOrSpread {
expr: Box::new(ast::Expr::Lit(ast::Lit::Str(normalized_specifier.into()))),
spread: None,
}],
span: DUMMY_SP,
type_args: None,
}
}
fn is_marked(span: Span, mark: Mark) -> bool {
let mut ctxt = span.ctxt();
loop {
let m = ctxt.remove_mark();
if m == Mark::root() {
return false;
}
if m == mark {
return true;
}
}
}
pub fn match_str(node: &ast::Expr) -> Option<(JsWord, Span)> {
use ast::*;
match node {
// "string" or 'string'
Expr::Lit(Lit::Str(s)) => Some((s.value.clone(), s.span)),
// `string`
Expr::Tpl(tpl) if tpl.quasis.len() == 1 && tpl.exprs.is_empty() => {
Some(((*tpl.quasis[0].raw).into(), tpl.span))
}
_ => None,
}
}
pub fn match_property_name(node: &ast::MemberExpr) -> Option<(JsWord, Span)> {
match &node.prop {
ast::MemberProp::Computed(s) => match_str(&s.expr),
ast::MemberProp::Ident(id) => Some((id.sym.clone(), id.span)),
ast::MemberProp::PrivateName(_) => None,
}
}
pub fn match_export_name(name: &ast::ModuleExportName) -> (JsWord, Span) {
match name {
ast::ModuleExportName::Ident(id) => (id.sym.clone(), id.span),
ast::ModuleExportName::Str(s) => (s.value.clone(), s.span),
}
}
/// Properties like `ExportNamedSpecifier::orig` have to be an Ident if `src` is `None`
pub fn match_export_name_ident(name: &ast::ModuleExportName) -> &ast::Ident {
match name {
ast::ModuleExportName::Ident(id) => id,
ast::ModuleExportName::Str(_) => unreachable!(),
}
}
pub fn match_require(node: &ast::Expr, decls: &HashSet<Id>, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Expr(expr) => match &**expr {
Expr::Ident(ident) => {
if ident.sym == js_word!("require")
&& !decls.contains(&(ident.sym.clone(), ident.span.ctxt))
&& !is_marked(ident.span, ignore_mark)
{
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
Expr::Member(member) => {
if match_member_expr(member, vec!["module", "require"], decls) {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
_ => None,
},
_ => None,
},
_ => None,
}
}
pub fn match_import(node: &ast::Expr, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Import(ident) if !is_marked(ident.span, ignore_mark) => {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
None
}
_ => None,
},
_ => None,
}
}
// `name` must not be an existing binding.
pub fn create_global_decl_stmt(
name: swc_core::ecma::atoms::JsWord,
init: ast::Expr,
global_mark: Mark,
) -> (ast::Stmt, SyntaxContext) {
// The correct value would actually be `DUMMY_SP.apply_mark(Mark::fresh(Mark::root()))`.
// But this saves us from running the resolver again in some cases.
let span = DUMMY_SP.apply_mark(global_mark);
(
ast::Stmt::Decl(ast::Decl::Var(Box::new(ast::VarDecl {
kind: ast::VarDeclKind::Var,
declare: false,
span: DUMMY_SP,
decls: vec![ast::VarDeclarator {
name: ast::Pat::Ident(ast::BindingIdent::from(ast::Ident::new(name, span))),
span: DUMMY_SP,
definite: false,
init: Some(Box::new(init)),
}],
}))),
span.ctxt,
)
}
pub fn get_undefined_ident(unresolved_mark: Mark) -> ast::Ident {
ast::Ident::new(js_word!("undefined"), DUMMY_SP.apply_mark(unresolved_mark))
}
#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)]
/// Corresponds to the JS SourceLocation type (1-based, end exclusive)
pub struct SourceLocation {
pub start_line: usize,
pub start_col: usize,
pub end_line: usize,
pub end_col: usize,
}
impl SourceLocation {
pub fn from(source_map: &swc_core::common::SourceMap, span: swc_core::common::Span) -> Self {
if span.lo.is_dummy() || span.hi.is_dummy() {
return SourceLocation {
start_line: 1,
start_col: 1,
end_line: 1,
end_col: 2,
};
}
let start = source_map.lookup_char_pos(span.lo);
let end = source_map.lookup_char_pos(span.hi);
// SWC's columns are exclusive, ours are exclusive
// SWC has 0-based columns, ours are 1-based (column + 1)
SourceLocation {
start_line: start.line,
start_col: start.col_display + 1,
end_line: end.line,
end_col: end.col_display + 1,
}
}
}
impl PartialOrd for SourceLocation {
fn partial_cmp(&self, other: &SourceLocation) -> Option<Ordering> {
match self.start_line.cmp(&other.start_line) {
Ordering::Equal => self.start_col.partial_cmp(&other.start_col),
o => Some(o),
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CodeHighlight {
pub message: Option<String>,
pub loc: SourceLocation,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Diagnostic {
pub message: String,
pub code_highlights: Option<Vec<CodeHighlight>>,
pub hints: Option<Vec<String>>,
pub show_environment: bool,
pub severity: DiagnosticSeverity,
pub documentation_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub enum DiagnosticSeverity {
/// Fails the build with an error.
Error,
/// Logs a warning, but the build does not fail.
Warning,
/// An error if this is source code in the project, or a warning if in node_modules.
SourceError,
}
#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Clone, Copy)]
pub enum SourceType {
Script,
Module,
}
#[derive(Debug)]
pub struct Bailout {
pub loc: SourceLocation,
pub reason: BailoutReason,
}
impl Bailout {
pub fn to_diagnostic(&self) -> Diagnostic {
let (message, documentation_url) = self.reason.info();
Diagnostic {
message: message.into(),
documentation_url: Some(documentation_url.into()),
code_highlights: Some(vec![CodeHighlight {
loc: self.loc.clone(),
message: None,
}]),
show_environment: false,
severity: DiagnosticSeverity::Warning,
hints: None,
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum BailoutReason {
NonTopLevelRequire,
NonStaticDestructuring,
TopLevelReturn,
Eval,
NonStaticExports,
FreeModule,
FreeExports,
ExportsReassignment,
ModuleReassignment,
NonStaticDynamicImport,
NonStaticAccess,
}
impl BailoutReason {
fn info(&self) -> (&str, &str) {
match self {
BailoutReason::NonTopLevelRequire => (
"Conditional or non-top-level `require()` call. This causes the resolved module and all dependencies to be wrapped.",
"https://parceljs.org/features/scope-hoisting/#avoid-conditional-require()"
),
BailoutReason::NonStaticDestructuring => (
"Non-static destructuring of `require` or dynamic `import()`. This causes all exports of the resolved module to be included.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::TopLevelReturn => (
"Module contains a top-level `return` statement. This causes the module to be wrapped in a function and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-top-level-return"
),
BailoutReason::Eval => (
"Module contains usage of `eval`. This causes the module to be wrapped in a function and minification to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-eval"
),
BailoutReason::NonStaticExports => (
"Non-static access of CommonJS `exports` object. This causes tree shaking to be disabled for the module.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeModule => (
"Unknown usage of CommonJS `module` object. This causes the module to be wrapped, and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeExports => (
"Unknown usage of CommonJS `exports` object. This causes tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::ExportsReassignment => (
"Module contains a reassignment of the CommonJS `exports` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::ModuleReassignment => (
"Module contains a reassignment of the CommonJS `module` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::NonStaticDynamicImport => (
"Unknown dynamic import usage. This causes tree shaking to be disabled for the resolved module.",
"https://parceljs.org/features/scope-hoisting/#dynamic-imports"
),
BailoutReason::NonStaticAccess => (
"Non-static access of an `import` or `require`. This causes tree shaking to be disabled for the resolved module.",
"https://parceljs.org/features/scope-hoisting/#dynamic-member-accesses"
),
}
}
}
#[macro_export]
macro_rules! fold_member_expr_skip_prop {
() => {
fn fold_member_expr(
&mut self,
mut node: swc_core::ecma::ast::MemberExpr,
) -> swc_core::ecma::ast::MemberExpr {
node.obj = node.obj.fold_with(self);
if let swc_core::ecma::ast::MemberProp::Computed(_) = node.prop {
node.prop = node.prop.fold_with(self);
}
node
}
};
}
#[macro_export]
macro_rules! id {
($ident: expr) => {
$ident.to_id()
};
}
| {
return false;
} | conditional_block |
inotify_linux_2.go | //+build linux
package fsnotify
// watcher tracks the following events:
// - dir create, delete, move
// - file write, delete, move
//
// configFile changes and directory moves always trigger a full reload.
// It's not incremental.
//
// Also, any incremental changes to page files (*.page.*) will recreate
// static site completely (if StaticSite=true). There is no "incremental"
// generation of static site data, because tags, feeds and dir indexes
// can refer to files beyond a single file change.
//
// For these MACRO updates (full reload, createStatic), the system will
// bundle the updates and do it once at the end of processing a bunch of reads.
//
// syscall.Read is typically a blocking call.
// Thus, we SHOULDN'T close it from a different thread, since the behaviour
// in linux is undefined.
//
// To accomodate, we use select with a 1 second timeout, and use non-blocking
// reads, so read never hangs. We then also close the file descriptor within the
// readEvents loop.
//
// The design affords the following:
// - User can use a sleep to allow events to be delivered as a batch.
// This can help prevent running the same macro operation multiple times
// because events came in one at a time.
// The sleep also allows inotify to coalese similar events together.
// - Access to underlying linux events, so finer handling of moves, etc.
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"unsafe"
"github.com/ugorji/go-common/errorutil"
"github.com/ugorji/go-common/logging"
)
var log = logging.PkgLogger()
var ClosedErr = errorutil.String("<watcher closed>")
// Use select, so read doesn't block
const useSelect = true
// Use non-block, so we don't block on read.
const useNonBlock = true
type WatchEvent struct {
syscall.InotifyEvent
Path string
Name string
}
func (e *WatchEvent) String() string |
// Watcher implements a watch service.
// It allows user to handle events natively, but does
// management of event bus and delivery.
// User just provides a callback function.
type Watcher struct {
sl time.Duration
fd int
closed uint32
wds map[int32]string
flags map[string]uint32
mu sync.Mutex
fn func([]*WatchEvent)
ev chan []*WatchEvent
sysbufsize int
}
// NewWatcher returns a new Watcher instance.
// - bufsize: chan size (ie max number of batches available to be processed)
// - sysBufSize: syscall Inotify Buf size (ie max number of inotify events in each read)
// - sleepTime: sleep time between reads.
// Allows system coalese events, and allows us user handle events in batches.
// - fn: function to call for each batch of events read
func NewWatcher(bufsize, sysBufSize int, sleepTime time.Duration, fn func([]*WatchEvent),
) (w *Watcher, err error) {
fd, err := syscall.InotifyInit()
if err != nil {
return
}
if fd == -1 {
err = os.NewSyscallError("inotify_init", err)
return
}
if useNonBlock {
syscall.SetNonblock(fd, true)
}
w = &Watcher{
fd: fd,
fn: fn,
ev: make(chan []*WatchEvent, bufsize),
wds: make(map[int32]string),
flags: make(map[string]uint32),
sl: sleepTime,
sysbufsize: sysBufSize,
}
go w.readEvents()
go w.handleEvents()
return
}
func (w *Watcher) AddAll(ignoreErr, clear, recursive bool, flags uint32, fpaths ...string) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
var merr errorutil.Multi
fnErr := func(err error, message string, params ...interface{}) bool {
if ignoreErr {
log.IfError(nil, err, message, params...)
return false
} else {
errorutil.OnErrorf(&err, message, params...)
merr = append(merr, err)
return true
}
}
if clear && fnErr(w.clear(), "Error clearing Watcher") {
return merr.NonNilError()
}
for _, fpath := range fpaths {
var walkDoneErr = errorutil.String("DONE")
first := true
walkFn := func(f string, info os.FileInfo, inerr error) error {
if first || info.Mode().IsDir() {
if fnErr(w.add(f, flags), "Error adding path: %s", f) {
return walkDoneErr
}
}
if first && !recursive {
return walkDoneErr
}
first = false
return nil
}
if err := filepath.Walk(fpath, walkFn); err == walkDoneErr {
break
}
}
return merr.NonNilError()
}
func (w *Watcher) Add(fpath string, flags uint32) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.add(fpath, flags)
}
func (w *Watcher) add(fpath string, flags uint32) error {
if flags == 0 {
flags =
// delete: false
syscall.IN_CREATE |
syscall.IN_CLOSE_WRITE |
syscall.IN_MOVED_TO |
// delete: true
syscall.IN_MOVED_FROM |
syscall.IN_DELETE |
syscall.IN_DELETE_SELF |
syscall.IN_MOVE_SELF
}
// user can add more flags by passing the syscall.IN_MASK_ADD
wd, err := syscall.InotifyAddWatch(w.fd, fpath, flags)
if err != nil {
errorutil.OnErrorf(&err, "Error adding watch for path: %s", fpath)
return err
}
w.wds[int32(wd)] = fpath
w.flags[fpath] = flags
return nil
}
func (w *Watcher) Remove(fpath string) (err error) {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.remove(fpath)
}
func (w *Watcher) remove(fpath string) (err error) {
for k, v := range w.wds {
if v == fpath {
_, err = syscall.InotifyRmWatch(w.fd, uint32(k))
delete(w.wds, k)
delete(w.flags, v)
break
}
}
return
}
func (w *Watcher) Clear() error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.clear()
}
func (w *Watcher) clear() error {
var merr errorutil.Multi
for k, v := range w.wds {
if _, err := syscall.InotifyRmWatch(w.fd, uint32(k)); err != nil {
errorutil.OnErrorf(&err, "Error removing watch for path: %s", v)
merr = append(merr, err)
}
}
w.wds = make(map[int32]string)
w.flags = make(map[string]uint32)
return merr.NonNilError()
}
func (w *Watcher) Close() (err error) {
// Note that, with blocking read, Close is best effort. This is because read in linux
// does not error if the file descriptor is closed. Thus the "read" syscall may not unblock.
//
// To mitigate, we use select AND NonBlocking IO during the read.
if !atomic.CompareAndSwapUint32(&w.closed, 0, 1) {
return
}
w.mu.Lock()
defer w.mu.Unlock()
w.clear()
close(w.ev)
if !(useSelect || useNonBlock) {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}
return nil
}
func (w *Watcher) readEvents() {
if useSelect || useNonBlock {
defer func() {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}()
}
// inotify events come very quickly, so we can't handle them inline.
// Instead, we grab the list of events we read and put on a queue.
// This way, we can still work on a bunch of events at same time.
var buf = make([]byte, syscall.SizeofInotifyEvent*w.sysbufsize)
for {
// always check closed right before syscalls (read/select/sleep), to minimize chance
// of race condition where fd is closed, OS assigns to someone else, and we try to read.
// slight sleep gives a chance to coalese similar events into one
if w.sl != 0 {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
time.Sleep(w.sl)
}
if useSelect {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
// println(">>>>> select: Checking to read")
fdset := new(syscall.FdSet)
fdset.Bits[w.fd/64] |= 1 << (uint(w.fd) % 64) // FD_SET
// fdIsSet := (fdset.Bits[w.fd/64] & (1 << (uint(w.fd) % 64))) != 0 // FD_ISSET
// for i := range fdset.Bits { fdset.Bits[i] = 0 } // FD_ZERO
selTimeout := syscall.NsecToTimeval(int64(1 * time.Second))
num, err := syscall.Select(w.fd+1, fdset, nil, nil, &selTimeout)
// if err != nil || num == 0 {
if (fdset.Bits[w.fd/64] & (1 << (uint(w.fd) % 64))) == 0 { // FD_ISSET
log.IfError(nil, err, "Error during Watcher select, which returned: %d", num)
continue
}
// println(">>>>> select: will read")
}
if atomic.LoadUint32(&w.closed) != 0 {
return
}
n, err := syscall.Read(w.fd, buf[0:])
if useNonBlock && err == syscall.EAGAIN {
// println(">>>>> non-block: EAGAIN")
continue
}
// even if there is an error, see if any events already read and process them.
log.IfError(nil, err, "Error during Watcher read, which returned %d bytes", n)
if n == 0 {
break // EOF
}
if n < syscall.SizeofInotifyEvent {
continue // short read
}
var offset uint32
wevs := make([]*WatchEvent, 0, n/(syscall.SizeofInotifyEvent*2))
for offset <= uint32(n-syscall.SizeofInotifyEvent) {
// raw.Wd, raw.Mask, raw.Cookie, raw.Len (all uint32)
raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
fpath := w.wds[raw.Wd]
// skip some events
if raw.Mask&syscall.IN_IGNORED != 0 ||
raw.Mask&syscall.IN_Q_OVERFLOW != 0 ||
raw.Mask&syscall.IN_UNMOUNT != 0 ||
fpath == "" {
offset += syscall.SizeofInotifyEvent + raw.Len
continue
}
wev := &WatchEvent{InotifyEvent: *raw, Path: fpath}
if raw.Len != 0 {
bs := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
wev.Name = strings.TrimRight(string(bs[0:raw.Len]), "\000")
}
wevs = append(wevs, wev)
offset += syscall.SizeofInotifyEvent + raw.Len
}
select {
case w.ev <- wevs:
case <-time.After(10 * time.Millisecond):
// drop if after 30 milliseconds and no one to accept
}
}
}
func (w *Watcher) handleEvents() {
for x := range w.ev {
w.fn(x)
}
}
| {
s := make([]string, 0, 4)
x := e.Mask
if x&syscall.IN_ISDIR != 0 {
s = append(s, "IN_ISDIR")
}
if x&syscall.IN_CREATE != 0 {
s = append(s, "IN_CREATE")
}
if x&syscall.IN_CLOSE_WRITE != 0 {
s = append(s, "IN_CLOSE_WRITE")
}
if x&syscall.IN_MOVED_TO != 0 {
s = append(s, "IN_MOVED_TO")
}
if x&syscall.IN_MOVED_FROM != 0 {
s = append(s, "IN_MOVED_FROM")
}
if x&syscall.IN_DELETE != 0 {
s = append(s, "IN_DELETE")
}
if x&syscall.IN_DELETE_SELF != 0 {
s = append(s, "IN_DELETE_SELF")
}
if x&syscall.IN_MOVE_SELF != 0 {
s = append(s, "IN_MOVE_SELF")
}
return fmt.Sprintf("WatchEvent: Path: %s, Name: %s, Wd: %v, Cookie: %v, Mask: %b, %v",
e.Path, e.Name, e.Wd, e.Cookie, e.Mask, s)
} | identifier_body |
inotify_linux_2.go | //+build linux
package fsnotify
// watcher tracks the following events:
// - dir create, delete, move
// - file write, delete, move
//
// configFile changes and directory moves always trigger a full reload.
// It's not incremental.
//
// Also, any incremental changes to page files (*.page.*) will recreate
// static site completely (if StaticSite=true). There is no "incremental"
// generation of static site data, because tags, feeds and dir indexes
// can refer to files beyond a single file change.
//
// For these MACRO updates (full reload, createStatic), the system will
// bundle the updates and do it once at the end of processing a bunch of reads.
//
// syscall.Read is typically a blocking call.
// Thus, we SHOULDN'T close it from a different thread, since the behaviour
// in linux is undefined.
//
// To accomodate, we use select with a 1 second timeout, and use non-blocking
// reads, so read never hangs. We then also close the file descriptor within the
// readEvents loop.
//
// The design affords the following:
// - User can use a sleep to allow events to be delivered as a batch.
// This can help prevent running the same macro operation multiple times
// because events came in one at a time.
// The sleep also allows inotify to coalese similar events together.
// - Access to underlying linux events, so finer handling of moves, etc.
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"unsafe"
"github.com/ugorji/go-common/errorutil"
"github.com/ugorji/go-common/logging"
)
var log = logging.PkgLogger()
var ClosedErr = errorutil.String("<watcher closed>")
// Use select, so read doesn't block | const useNonBlock = true
type WatchEvent struct {
syscall.InotifyEvent
Path string
Name string
}
func (e *WatchEvent) String() string {
s := make([]string, 0, 4)
x := e.Mask
if x&syscall.IN_ISDIR != 0 {
s = append(s, "IN_ISDIR")
}
if x&syscall.IN_CREATE != 0 {
s = append(s, "IN_CREATE")
}
if x&syscall.IN_CLOSE_WRITE != 0 {
s = append(s, "IN_CLOSE_WRITE")
}
if x&syscall.IN_MOVED_TO != 0 {
s = append(s, "IN_MOVED_TO")
}
if x&syscall.IN_MOVED_FROM != 0 {
s = append(s, "IN_MOVED_FROM")
}
if x&syscall.IN_DELETE != 0 {
s = append(s, "IN_DELETE")
}
if x&syscall.IN_DELETE_SELF != 0 {
s = append(s, "IN_DELETE_SELF")
}
if x&syscall.IN_MOVE_SELF != 0 {
s = append(s, "IN_MOVE_SELF")
}
return fmt.Sprintf("WatchEvent: Path: %s, Name: %s, Wd: %v, Cookie: %v, Mask: %b, %v",
e.Path, e.Name, e.Wd, e.Cookie, e.Mask, s)
}
// Watcher implements a watch service.
// It allows user to handle events natively, but does
// management of event bus and delivery.
// User just provides a callback function.
type Watcher struct {
sl time.Duration
fd int
closed uint32
wds map[int32]string
flags map[string]uint32
mu sync.Mutex
fn func([]*WatchEvent)
ev chan []*WatchEvent
sysbufsize int
}
// NewWatcher returns a new Watcher instance.
// - bufsize: chan size (ie max number of batches available to be processed)
// - sysBufSize: syscall Inotify Buf size (ie max number of inotify events in each read)
// - sleepTime: sleep time between reads.
// Allows system coalese events, and allows us user handle events in batches.
// - fn: function to call for each batch of events read
func NewWatcher(bufsize, sysBufSize int, sleepTime time.Duration, fn func([]*WatchEvent),
) (w *Watcher, err error) {
fd, err := syscall.InotifyInit()
if err != nil {
return
}
if fd == -1 {
err = os.NewSyscallError("inotify_init", err)
return
}
if useNonBlock {
syscall.SetNonblock(fd, true)
}
w = &Watcher{
fd: fd,
fn: fn,
ev: make(chan []*WatchEvent, bufsize),
wds: make(map[int32]string),
flags: make(map[string]uint32),
sl: sleepTime,
sysbufsize: sysBufSize,
}
go w.readEvents()
go w.handleEvents()
return
}
func (w *Watcher) AddAll(ignoreErr, clear, recursive bool, flags uint32, fpaths ...string) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
var merr errorutil.Multi
fnErr := func(err error, message string, params ...interface{}) bool {
if ignoreErr {
log.IfError(nil, err, message, params...)
return false
} else {
errorutil.OnErrorf(&err, message, params...)
merr = append(merr, err)
return true
}
}
if clear && fnErr(w.clear(), "Error clearing Watcher") {
return merr.NonNilError()
}
for _, fpath := range fpaths {
var walkDoneErr = errorutil.String("DONE")
first := true
walkFn := func(f string, info os.FileInfo, inerr error) error {
if first || info.Mode().IsDir() {
if fnErr(w.add(f, flags), "Error adding path: %s", f) {
return walkDoneErr
}
}
if first && !recursive {
return walkDoneErr
}
first = false
return nil
}
if err := filepath.Walk(fpath, walkFn); err == walkDoneErr {
break
}
}
return merr.NonNilError()
}
func (w *Watcher) Add(fpath string, flags uint32) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.add(fpath, flags)
}
func (w *Watcher) add(fpath string, flags uint32) error {
if flags == 0 {
flags =
// delete: false
syscall.IN_CREATE |
syscall.IN_CLOSE_WRITE |
syscall.IN_MOVED_TO |
// delete: true
syscall.IN_MOVED_FROM |
syscall.IN_DELETE |
syscall.IN_DELETE_SELF |
syscall.IN_MOVE_SELF
}
// user can add more flags by passing the syscall.IN_MASK_ADD
wd, err := syscall.InotifyAddWatch(w.fd, fpath, flags)
if err != nil {
errorutil.OnErrorf(&err, "Error adding watch for path: %s", fpath)
return err
}
w.wds[int32(wd)] = fpath
w.flags[fpath] = flags
return nil
}
func (w *Watcher) Remove(fpath string) (err error) {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.remove(fpath)
}
func (w *Watcher) remove(fpath string) (err error) {
for k, v := range w.wds {
if v == fpath {
_, err = syscall.InotifyRmWatch(w.fd, uint32(k))
delete(w.wds, k)
delete(w.flags, v)
break
}
}
return
}
func (w *Watcher) Clear() error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.clear()
}
func (w *Watcher) clear() error {
var merr errorutil.Multi
for k, v := range w.wds {
if _, err := syscall.InotifyRmWatch(w.fd, uint32(k)); err != nil {
errorutil.OnErrorf(&err, "Error removing watch for path: %s", v)
merr = append(merr, err)
}
}
w.wds = make(map[int32]string)
w.flags = make(map[string]uint32)
return merr.NonNilError()
}
func (w *Watcher) Close() (err error) {
// Note that, with blocking read, Close is best effort. This is because read in linux
// does not error if the file descriptor is closed. Thus the "read" syscall may not unblock.
//
// To mitigate, we use select AND NonBlocking IO during the read.
if !atomic.CompareAndSwapUint32(&w.closed, 0, 1) {
return
}
w.mu.Lock()
defer w.mu.Unlock()
w.clear()
close(w.ev)
if !(useSelect || useNonBlock) {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}
return nil
}
func (w *Watcher) readEvents() {
if useSelect || useNonBlock {
defer func() {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}()
}
// inotify events come very quickly, so we can't handle them inline.
// Instead, we grab the list of events we read and put on a queue.
// This way, we can still work on a bunch of events at same time.
var buf = make([]byte, syscall.SizeofInotifyEvent*w.sysbufsize)
for {
// always check closed right before syscalls (read/select/sleep), to minimize chance
// of race condition where fd is closed, OS assigns to someone else, and we try to read.
// slight sleep gives a chance to coalese similar events into one
if w.sl != 0 {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
time.Sleep(w.sl)
}
if useSelect {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
// println(">>>>> select: Checking to read")
fdset := new(syscall.FdSet)
fdset.Bits[w.fd/64] |= 1 << (uint(w.fd) % 64) // FD_SET
// fdIsSet := (fdset.Bits[w.fd/64] & (1 << (uint(w.fd) % 64))) != 0 // FD_ISSET
// for i := range fdset.Bits { fdset.Bits[i] = 0 } // FD_ZERO
selTimeout := syscall.NsecToTimeval(int64(1 * time.Second))
num, err := syscall.Select(w.fd+1, fdset, nil, nil, &selTimeout)
// if err != nil || num == 0 {
if (fdset.Bits[w.fd/64] & (1 << (uint(w.fd) % 64))) == 0 { // FD_ISSET
log.IfError(nil, err, "Error during Watcher select, which returned: %d", num)
continue
}
// println(">>>>> select: will read")
}
if atomic.LoadUint32(&w.closed) != 0 {
return
}
n, err := syscall.Read(w.fd, buf[0:])
if useNonBlock && err == syscall.EAGAIN {
// println(">>>>> non-block: EAGAIN")
continue
}
// even if there is an error, see if any events already read and process them.
log.IfError(nil, err, "Error during Watcher read, which returned %d bytes", n)
if n == 0 {
break // EOF
}
if n < syscall.SizeofInotifyEvent {
continue // short read
}
var offset uint32
wevs := make([]*WatchEvent, 0, n/(syscall.SizeofInotifyEvent*2))
for offset <= uint32(n-syscall.SizeofInotifyEvent) {
// raw.Wd, raw.Mask, raw.Cookie, raw.Len (all uint32)
raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
fpath := w.wds[raw.Wd]
// skip some events
if raw.Mask&syscall.IN_IGNORED != 0 ||
raw.Mask&syscall.IN_Q_OVERFLOW != 0 ||
raw.Mask&syscall.IN_UNMOUNT != 0 ||
fpath == "" {
offset += syscall.SizeofInotifyEvent + raw.Len
continue
}
wev := &WatchEvent{InotifyEvent: *raw, Path: fpath}
if raw.Len != 0 {
bs := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
wev.Name = strings.TrimRight(string(bs[0:raw.Len]), "\000")
}
wevs = append(wevs, wev)
offset += syscall.SizeofInotifyEvent + raw.Len
}
select {
case w.ev <- wevs:
case <-time.After(10 * time.Millisecond):
// drop if after 30 milliseconds and no one to accept
}
}
}
func (w *Watcher) handleEvents() {
for x := range w.ev {
w.fn(x)
}
} | const useSelect = true
// Use non-block, so we don't block on read. | random_line_split |
inotify_linux_2.go | //+build linux
package fsnotify
// watcher tracks the following events:
// - dir create, delete, move
// - file write, delete, move
//
// configFile changes and directory moves always trigger a full reload.
// It's not incremental.
//
// Also, any incremental changes to page files (*.page.*) will recreate
// static site completely (if StaticSite=true). There is no "incremental"
// generation of static site data, because tags, feeds and dir indexes
// can refer to files beyond a single file change.
//
// For these MACRO updates (full reload, createStatic), the system will
// bundle the updates and do it once at the end of processing a bunch of reads.
//
// syscall.Read is typically a blocking call.
// Thus, we SHOULDN'T close it from a different thread, since the behaviour
// in linux is undefined.
//
// To accomodate, we use select with a 1 second timeout, and use non-blocking
// reads, so read never hangs. We then also close the file descriptor within the
// readEvents loop.
//
// The design affords the following:
// - User can use a sleep to allow events to be delivered as a batch.
// This can help prevent running the same macro operation multiple times
// because events came in one at a time.
// The sleep also allows inotify to coalese similar events together.
// - Access to underlying linux events, so finer handling of moves, etc.
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"unsafe"
"github.com/ugorji/go-common/errorutil"
"github.com/ugorji/go-common/logging"
)
var log = logging.PkgLogger()
var ClosedErr = errorutil.String("<watcher closed>")
// Use select, so read doesn't block
const useSelect = true
// Use non-block, so we don't block on read.
const useNonBlock = true
type WatchEvent struct {
syscall.InotifyEvent
Path string
Name string
}
func (e *WatchEvent) String() string {
s := make([]string, 0, 4)
x := e.Mask
if x&syscall.IN_ISDIR != 0 {
s = append(s, "IN_ISDIR")
}
if x&syscall.IN_CREATE != 0 {
s = append(s, "IN_CREATE")
}
if x&syscall.IN_CLOSE_WRITE != 0 {
s = append(s, "IN_CLOSE_WRITE")
}
if x&syscall.IN_MOVED_TO != 0 {
s = append(s, "IN_MOVED_TO")
}
if x&syscall.IN_MOVED_FROM != 0 {
s = append(s, "IN_MOVED_FROM")
}
if x&syscall.IN_DELETE != 0 {
s = append(s, "IN_DELETE")
}
if x&syscall.IN_DELETE_SELF != 0 {
s = append(s, "IN_DELETE_SELF")
}
if x&syscall.IN_MOVE_SELF != 0 {
s = append(s, "IN_MOVE_SELF")
}
return fmt.Sprintf("WatchEvent: Path: %s, Name: %s, Wd: %v, Cookie: %v, Mask: %b, %v",
e.Path, e.Name, e.Wd, e.Cookie, e.Mask, s)
}
// Watcher implements a watch service.
// It allows user to handle events natively, but does
// management of event bus and delivery.
// User just provides a callback function.
type Watcher struct {
sl time.Duration
fd int
closed uint32
wds map[int32]string
flags map[string]uint32
mu sync.Mutex
fn func([]*WatchEvent)
ev chan []*WatchEvent
sysbufsize int
}
// NewWatcher returns a new Watcher instance.
// - bufsize: chan size (ie max number of batches available to be processed)
// - sysBufSize: syscall Inotify Buf size (ie max number of inotify events in each read)
// - sleepTime: sleep time between reads.
// Allows system coalese events, and allows us user handle events in batches.
// - fn: function to call for each batch of events read
func NewWatcher(bufsize, sysBufSize int, sleepTime time.Duration, fn func([]*WatchEvent),
) (w *Watcher, err error) {
fd, err := syscall.InotifyInit()
if err != nil {
return
}
if fd == -1 {
err = os.NewSyscallError("inotify_init", err)
return
}
if useNonBlock {
syscall.SetNonblock(fd, true)
}
w = &Watcher{
fd: fd,
fn: fn,
ev: make(chan []*WatchEvent, bufsize),
wds: make(map[int32]string),
flags: make(map[string]uint32),
sl: sleepTime,
sysbufsize: sysBufSize,
}
go w.readEvents()
go w.handleEvents()
return
}
func (w *Watcher) AddAll(ignoreErr, clear, recursive bool, flags uint32, fpaths ...string) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
var merr errorutil.Multi
fnErr := func(err error, message string, params ...interface{}) bool {
if ignoreErr {
log.IfError(nil, err, message, params...)
return false
} else {
errorutil.OnErrorf(&err, message, params...)
merr = append(merr, err)
return true
}
}
if clear && fnErr(w.clear(), "Error clearing Watcher") {
return merr.NonNilError()
}
for _, fpath := range fpaths {
var walkDoneErr = errorutil.String("DONE")
first := true
walkFn := func(f string, info os.FileInfo, inerr error) error {
if first || info.Mode().IsDir() {
if fnErr(w.add(f, flags), "Error adding path: %s", f) {
return walkDoneErr
}
}
if first && !recursive {
return walkDoneErr
}
first = false
return nil
}
if err := filepath.Walk(fpath, walkFn); err == walkDoneErr {
break
}
}
return merr.NonNilError()
}
func (w *Watcher) | (fpath string, flags uint32) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.add(fpath, flags)
}
func (w *Watcher) add(fpath string, flags uint32) error {
if flags == 0 {
flags =
// delete: false
syscall.IN_CREATE |
syscall.IN_CLOSE_WRITE |
syscall.IN_MOVED_TO |
// delete: true
syscall.IN_MOVED_FROM |
syscall.IN_DELETE |
syscall.IN_DELETE_SELF |
syscall.IN_MOVE_SELF
}
// user can add more flags by passing the syscall.IN_MASK_ADD
wd, err := syscall.InotifyAddWatch(w.fd, fpath, flags)
if err != nil {
errorutil.OnErrorf(&err, "Error adding watch for path: %s", fpath)
return err
}
w.wds[int32(wd)] = fpath
w.flags[fpath] = flags
return nil
}
func (w *Watcher) Remove(fpath string) (err error) {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.remove(fpath)
}
func (w *Watcher) remove(fpath string) (err error) {
for k, v := range w.wds {
if v == fpath {
_, err = syscall.InotifyRmWatch(w.fd, uint32(k))
delete(w.wds, k)
delete(w.flags, v)
break
}
}
return
}
func (w *Watcher) Clear() error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.clear()
}
func (w *Watcher) clear() error {
var merr errorutil.Multi
for k, v := range w.wds {
if _, err := syscall.InotifyRmWatch(w.fd, uint32(k)); err != nil {
errorutil.OnErrorf(&err, "Error removing watch for path: %s", v)
merr = append(merr, err)
}
}
w.wds = make(map[int32]string)
w.flags = make(map[string]uint32)
return merr.NonNilError()
}
func (w *Watcher) Close() (err error) {
// Note that, with blocking read, Close is best effort. This is because read in linux
// does not error if the file descriptor is closed. Thus the "read" syscall may not unblock.
//
// To mitigate, we use select AND NonBlocking IO during the read.
if !atomic.CompareAndSwapUint32(&w.closed, 0, 1) {
return
}
w.mu.Lock()
defer w.mu.Unlock()
w.clear()
close(w.ev)
if !(useSelect || useNonBlock) {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}
return nil
}
func (w *Watcher) readEvents() {
if useSelect || useNonBlock {
defer func() {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}()
}
// inotify events come very quickly, so we can't handle them inline.
// Instead, we grab the list of events we read and put on a queue.
// This way, we can still work on a bunch of events at same time.
var buf = make([]byte, syscall.SizeofInotifyEvent*w.sysbufsize)
for {
// always check closed right before syscalls (read/select/sleep), to minimize chance
// of race condition where fd is closed, OS assigns to someone else, and we try to read.
// slight sleep gives a chance to coalese similar events into one
if w.sl != 0 {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
time.Sleep(w.sl)
}
if useSelect {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
// println(">>>>> select: Checking to read")
fdset := new(syscall.FdSet)
fdset.Bits[w.fd/64] |= 1 << (uint(w.fd) % 64) // FD_SET
// fdIsSet := (fdset.Bits[w.fd/64] & (1 << (uint(w.fd) % 64))) != 0 // FD_ISSET
// for i := range fdset.Bits { fdset.Bits[i] = 0 } // FD_ZERO
selTimeout := syscall.NsecToTimeval(int64(1 * time.Second))
num, err := syscall.Select(w.fd+1, fdset, nil, nil, &selTimeout)
// if err != nil || num == 0 {
if (fdset.Bits[w.fd/64] & (1 << (uint(w.fd) % 64))) == 0 { // FD_ISSET
log.IfError(nil, err, "Error during Watcher select, which returned: %d", num)
continue
}
// println(">>>>> select: will read")
}
if atomic.LoadUint32(&w.closed) != 0 {
return
}
n, err := syscall.Read(w.fd, buf[0:])
if useNonBlock && err == syscall.EAGAIN {
// println(">>>>> non-block: EAGAIN")
continue
}
// even if there is an error, see if any events already read and process them.
log.IfError(nil, err, "Error during Watcher read, which returned %d bytes", n)
if n == 0 {
break // EOF
}
if n < syscall.SizeofInotifyEvent {
continue // short read
}
var offset uint32
wevs := make([]*WatchEvent, 0, n/(syscall.SizeofInotifyEvent*2))
for offset <= uint32(n-syscall.SizeofInotifyEvent) {
// raw.Wd, raw.Mask, raw.Cookie, raw.Len (all uint32)
raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
fpath := w.wds[raw.Wd]
// skip some events
if raw.Mask&syscall.IN_IGNORED != 0 ||
raw.Mask&syscall.IN_Q_OVERFLOW != 0 ||
raw.Mask&syscall.IN_UNMOUNT != 0 ||
fpath == "" {
offset += syscall.SizeofInotifyEvent + raw.Len
continue
}
wev := &WatchEvent{InotifyEvent: *raw, Path: fpath}
if raw.Len != 0 {
bs := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
wev.Name = strings.TrimRight(string(bs[0:raw.Len]), "\000")
}
wevs = append(wevs, wev)
offset += syscall.SizeofInotifyEvent + raw.Len
}
select {
case w.ev <- wevs:
case <-time.After(10 * time.Millisecond):
// drop if after 30 milliseconds and no one to accept
}
}
}
func (w *Watcher) handleEvents() {
for x := range w.ev {
w.fn(x)
}
}
| Add | identifier_name |
inotify_linux_2.go | //+build linux
package fsnotify
// watcher tracks the following events:
// - dir create, delete, move
// - file write, delete, move
//
// configFile changes and directory moves always trigger a full reload.
// It's not incremental.
//
// Also, any incremental changes to page files (*.page.*) will recreate
// static site completely (if StaticSite=true). There is no "incremental"
// generation of static site data, because tags, feeds and dir indexes
// can refer to files beyond a single file change.
//
// For these MACRO updates (full reload, createStatic), the system will
// bundle the updates and do it once at the end of processing a bunch of reads.
//
// syscall.Read is typically a blocking call.
// Thus, we SHOULDN'T close it from a different thread, since the behaviour
// in linux is undefined.
//
// To accomodate, we use select with a 1 second timeout, and use non-blocking
// reads, so read never hangs. We then also close the file descriptor within the
// readEvents loop.
//
// The design affords the following:
// - User can use a sleep to allow events to be delivered as a batch.
// This can help prevent running the same macro operation multiple times
// because events came in one at a time.
// The sleep also allows inotify to coalese similar events together.
// - Access to underlying linux events, so finer handling of moves, etc.
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"unsafe"
"github.com/ugorji/go-common/errorutil"
"github.com/ugorji/go-common/logging"
)
var log = logging.PkgLogger()
var ClosedErr = errorutil.String("<watcher closed>")
// Use select, so read doesn't block
const useSelect = true
// Use non-block, so we don't block on read.
const useNonBlock = true
type WatchEvent struct {
syscall.InotifyEvent
Path string
Name string
}
func (e *WatchEvent) String() string {
s := make([]string, 0, 4)
x := e.Mask
if x&syscall.IN_ISDIR != 0 {
s = append(s, "IN_ISDIR")
}
if x&syscall.IN_CREATE != 0 {
s = append(s, "IN_CREATE")
}
if x&syscall.IN_CLOSE_WRITE != 0 {
s = append(s, "IN_CLOSE_WRITE")
}
if x&syscall.IN_MOVED_TO != 0 {
s = append(s, "IN_MOVED_TO")
}
if x&syscall.IN_MOVED_FROM != 0 {
s = append(s, "IN_MOVED_FROM")
}
if x&syscall.IN_DELETE != 0 {
s = append(s, "IN_DELETE")
}
if x&syscall.IN_DELETE_SELF != 0 {
s = append(s, "IN_DELETE_SELF")
}
if x&syscall.IN_MOVE_SELF != 0 {
s = append(s, "IN_MOVE_SELF")
}
return fmt.Sprintf("WatchEvent: Path: %s, Name: %s, Wd: %v, Cookie: %v, Mask: %b, %v",
e.Path, e.Name, e.Wd, e.Cookie, e.Mask, s)
}
// Watcher implements a watch service.
// It allows user to handle events natively, but does
// management of event bus and delivery.
// User just provides a callback function.
type Watcher struct {
sl time.Duration
fd int
closed uint32
wds map[int32]string
flags map[string]uint32
mu sync.Mutex
fn func([]*WatchEvent)
ev chan []*WatchEvent
sysbufsize int
}
// NewWatcher returns a new Watcher instance.
// - bufsize: chan size (ie max number of batches available to be processed)
// - sysBufSize: syscall Inotify Buf size (ie max number of inotify events in each read)
// - sleepTime: sleep time between reads.
// Allows system coalese events, and allows us user handle events in batches.
// - fn: function to call for each batch of events read
func NewWatcher(bufsize, sysBufSize int, sleepTime time.Duration, fn func([]*WatchEvent),
) (w *Watcher, err error) {
fd, err := syscall.InotifyInit()
if err != nil {
return
}
if fd == -1 {
err = os.NewSyscallError("inotify_init", err)
return
}
if useNonBlock {
syscall.SetNonblock(fd, true)
}
w = &Watcher{
fd: fd,
fn: fn,
ev: make(chan []*WatchEvent, bufsize),
wds: make(map[int32]string),
flags: make(map[string]uint32),
sl: sleepTime,
sysbufsize: sysBufSize,
}
go w.readEvents()
go w.handleEvents()
return
}
func (w *Watcher) AddAll(ignoreErr, clear, recursive bool, flags uint32, fpaths ...string) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
var merr errorutil.Multi
fnErr := func(err error, message string, params ...interface{}) bool {
if ignoreErr {
log.IfError(nil, err, message, params...)
return false
} else {
errorutil.OnErrorf(&err, message, params...)
merr = append(merr, err)
return true
}
}
if clear && fnErr(w.clear(), "Error clearing Watcher") {
return merr.NonNilError()
}
for _, fpath := range fpaths {
var walkDoneErr = errorutil.String("DONE")
first := true
walkFn := func(f string, info os.FileInfo, inerr error) error {
if first || info.Mode().IsDir() {
if fnErr(w.add(f, flags), "Error adding path: %s", f) {
return walkDoneErr
}
}
if first && !recursive {
return walkDoneErr
}
first = false
return nil
}
if err := filepath.Walk(fpath, walkFn); err == walkDoneErr {
break
}
}
return merr.NonNilError()
}
func (w *Watcher) Add(fpath string, flags uint32) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.add(fpath, flags)
}
func (w *Watcher) add(fpath string, flags uint32) error {
if flags == 0 {
flags =
// delete: false
syscall.IN_CREATE |
syscall.IN_CLOSE_WRITE |
syscall.IN_MOVED_TO |
// delete: true
syscall.IN_MOVED_FROM |
syscall.IN_DELETE |
syscall.IN_DELETE_SELF |
syscall.IN_MOVE_SELF
}
// user can add more flags by passing the syscall.IN_MASK_ADD
wd, err := syscall.InotifyAddWatch(w.fd, fpath, flags)
if err != nil {
errorutil.OnErrorf(&err, "Error adding watch for path: %s", fpath)
return err
}
w.wds[int32(wd)] = fpath
w.flags[fpath] = flags
return nil
}
func (w *Watcher) Remove(fpath string) (err error) {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.remove(fpath)
}
func (w *Watcher) remove(fpath string) (err error) {
for k, v := range w.wds {
if v == fpath {
_, err = syscall.InotifyRmWatch(w.fd, uint32(k))
delete(w.wds, k)
delete(w.flags, v)
break
}
}
return
}
func (w *Watcher) Clear() error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.clear()
}
func (w *Watcher) clear() error {
var merr errorutil.Multi
for k, v := range w.wds {
if _, err := syscall.InotifyRmWatch(w.fd, uint32(k)); err != nil {
errorutil.OnErrorf(&err, "Error removing watch for path: %s", v)
merr = append(merr, err)
}
}
w.wds = make(map[int32]string)
w.flags = make(map[string]uint32)
return merr.NonNilError()
}
func (w *Watcher) Close() (err error) {
// Note that, with blocking read, Close is best effort. This is because read in linux
// does not error if the file descriptor is closed. Thus the "read" syscall may not unblock.
//
// To mitigate, we use select AND NonBlocking IO during the read.
if !atomic.CompareAndSwapUint32(&w.closed, 0, 1) {
return
}
w.mu.Lock()
defer w.mu.Unlock()
w.clear()
close(w.ev)
if !(useSelect || useNonBlock) {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}
return nil
}
func (w *Watcher) readEvents() {
if useSelect || useNonBlock {
defer func() {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}()
}
// inotify events come very quickly, so we can't handle them inline.
// Instead, we grab the list of events we read and put on a queue.
// This way, we can still work on a bunch of events at same time.
var buf = make([]byte, syscall.SizeofInotifyEvent*w.sysbufsize)
for {
// always check closed right before syscalls (read/select/sleep), to minimize chance
// of race condition where fd is closed, OS assigns to someone else, and we try to read.
// slight sleep gives a chance to coalese similar events into one
if w.sl != 0 {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
time.Sleep(w.sl)
}
if useSelect {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
// println(">>>>> select: Checking to read")
fdset := new(syscall.FdSet)
fdset.Bits[w.fd/64] |= 1 << (uint(w.fd) % 64) // FD_SET
// fdIsSet := (fdset.Bits[w.fd/64] & (1 << (uint(w.fd) % 64))) != 0 // FD_ISSET
// for i := range fdset.Bits { fdset.Bits[i] = 0 } // FD_ZERO
selTimeout := syscall.NsecToTimeval(int64(1 * time.Second))
num, err := syscall.Select(w.fd+1, fdset, nil, nil, &selTimeout)
// if err != nil || num == 0 {
if (fdset.Bits[w.fd/64] & (1 << (uint(w.fd) % 64))) == 0 { // FD_ISSET
log.IfError(nil, err, "Error during Watcher select, which returned: %d", num)
continue
}
// println(">>>>> select: will read")
}
if atomic.LoadUint32(&w.closed) != 0 {
return
}
n, err := syscall.Read(w.fd, buf[0:])
if useNonBlock && err == syscall.EAGAIN {
// println(">>>>> non-block: EAGAIN")
continue
}
// even if there is an error, see if any events already read and process them.
log.IfError(nil, err, "Error during Watcher read, which returned %d bytes", n)
if n == 0 {
break // EOF
}
if n < syscall.SizeofInotifyEvent {
continue // short read
}
var offset uint32
wevs := make([]*WatchEvent, 0, n/(syscall.SizeofInotifyEvent*2))
for offset <= uint32(n-syscall.SizeofInotifyEvent) {
// raw.Wd, raw.Mask, raw.Cookie, raw.Len (all uint32)
raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
fpath := w.wds[raw.Wd]
// skip some events
if raw.Mask&syscall.IN_IGNORED != 0 ||
raw.Mask&syscall.IN_Q_OVERFLOW != 0 ||
raw.Mask&syscall.IN_UNMOUNT != 0 ||
fpath == "" {
offset += syscall.SizeofInotifyEvent + raw.Len
continue
}
wev := &WatchEvent{InotifyEvent: *raw, Path: fpath}
if raw.Len != 0 |
wevs = append(wevs, wev)
offset += syscall.SizeofInotifyEvent + raw.Len
}
select {
case w.ev <- wevs:
case <-time.After(10 * time.Millisecond):
// drop if after 30 milliseconds and no one to accept
}
}
}
func (w *Watcher) handleEvents() {
for x := range w.ev {
w.fn(x)
}
}
| {
bs := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
wev.Name = strings.TrimRight(string(bs[0:raw.Len]), "\000")
} | conditional_block |
utils.go | // Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package util
import (
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/handlers"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
docker "github.com/fsouza/go-dockerclient"
)
var ECS *ecs.ECS
var Cluster string
func init() {
var ecsconfig aws.Config
if region := os.Getenv("AWS_REGION"); region != "" {
ecsconfig.Region = ®ion
}
if region := os.Getenv("AWS_DEFAULT_REGION"); region != "" {
ecsconfig.Region = ®ion
}
if ecsconfig.Region == nil {
if iid, err := ec2.GetInstanceIdentityDocument(); err == nil {
ecsconfig.Region = &iid.Region
}
}
if envEndpoint := os.Getenv("ECS_BACKEND_HOST"); envEndpoint != "" {
ecsconfig.Endpoint = &envEndpoint
}
ECS = ecs.New(session.New(&ecsconfig))
Cluster = "ecs-functional-tests"
if envCluster := os.Getenv("ECS_CLUSTER"); envCluster != "" {
Cluster = envCluster
}
ECS.CreateCluster(&ecs.CreateClusterInput{
ClusterName: aws.String(Cluster),
})
}
// GetTaskDefinition is a helper that provies the family:revision for the named
// task definition where the name matches the folder in which the task
// definition is present. In order to avoid re-registering a task definition
// when it has already been regestered in the past, this registers a task
// definition of the pattern 'family-md5sum' with md5sum being the input task
// definition json's md5. This special family name is checked for existence
// before a new one is registered and it is assumed that if it exists, the task
// definition currently represented by the file was registered as such already.
func GetTaskDefinition(name string) (string, error) {
_, filename, _, _ := runtime.Caller(0)
tdData, err := ioutil.ReadFile(filepath.Join(path.Dir(filename), "..", "testdata", "taskdefinitions", name, "task-definition.json"))
if err != nil {
return "", err
}
registerRequest := &ecs.RegisterTaskDefinitionInput{}
err = json.Unmarshal(tdData, registerRequest)
if err != nil {
return "", err
}
tdHash := fmt.Sprintf("%x", md5.Sum(tdData))
idempotentFamily := *registerRequest.Family + "-" + tdHash
existing, err := ECS.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{
TaskDefinition: &idempotentFamily,
})
if err == nil {
return fmt.Sprintf("%s:%d", *existing.TaskDefinition.Family, *existing.TaskDefinition.Revision), nil
}
registerRequest.Family = &idempotentFamily
registered, err := ECS.RegisterTaskDefinition(registerRequest)
if err != nil {
return "", err
}
return fmt.Sprintf("%s:%d", *registered.TaskDefinition.Family, *registered.TaskDefinition.Revision), nil
}
type TestAgent struct {
Image string
DockerID string
IntrospectionURL string
Version string
ContainerInstanceArn string
Cluster string
TestDir string
Logdir string
Options *AgentOptions
DockerClient *docker.Client
t *testing.T
}
type AgentOptions struct {
ExtraEnvironment map[string]string
ContainerLinks []string
}
// RunAgent launches the agent and returns an object which may be used to reference it.
// It will wait until the agent is correctly registered before returning.
// 'version' may be a docker image (e.g. amazon/amazon-ecs-agent:v1.0.0) with
// tag that may be used to run the agent. It defaults to
// 'amazon/amazon-ecs-agent:make', the version created locally by running
// 'make'
func RunAgent(t *testing.T, options *AgentOptions) *TestAgent {
agent := &TestAgent{t: t}
agentImage := "amazon/amazon-ecs-agent:make"
if envImage := os.Getenv("ECS_AGENT_IMAGE"); envImage != "" {
agentImage = envImage
}
agent.Image = agentImage
dockerClient, err := docker.NewClientFromEnv()
if err != nil {
t.Fatal(err)
}
agent.DockerClient = dockerClient
_, err = dockerClient.InspectImage(agentImage)
if err != nil {
err = dockerClient.PullImage(docker.PullImageOptions{Repository: agentImage}, docker.AuthConfiguration{})
if err != nil {
t.Fatal("Could not launch agent", err)
}
}
tmpdirOverride := os.Getenv("ECS_FTEST_TMP")
agentTempdir, err := ioutil.TempDir(tmpdirOverride, "ecs_integ_testdata")
if err != nil {
t.Fatal("Could not create temp dir for test")
}
logdir := filepath.Join(agentTempdir, "logs")
datadir := filepath.Join(agentTempdir, "data")
os.Mkdir(logdir, 0755)
os.Mkdir(datadir, 0755)
agent.TestDir = agentTempdir
agent.Options = options
if options == nil {
agent.Options = &AgentOptions{}
}
t.Logf("Created directory %s to store test data in", agentTempdir)
err = agent.StartAgent()
if err != nil {
t.Fatal(err)
}
return agent
}
func (agent *TestAgent) StopAgent() error {
return agent.DockerClient.StopContainer(agent.DockerID, 10)
}
func (agent *TestAgent) StartAgent() error {
agent.t.Logf("Launching agent with image: %s\n", agent.Image)
logdir := filepath.Join(agent.TestDir, "logs")
datadir := filepath.Join(agent.TestDir, "data")
agent.Logdir = logdir
dockerConfig := &docker.Config{
Image: agent.Image,
ExposedPorts: map[docker.Port]struct{}{
"51678/tcp": struct{}{},
},
Env: []string{
"ECS_CLUSTER=" + Cluster,
"ECS_DATADIR=/data",
"ECS_LOGLEVEL=debug",
"ECS_LOGFILE=/logs/integ_agent.log",
"ECS_BACKEND_HOST=" + os.Getenv("ECS_BACKEND_HOST"),
"AWS_ACCESS_KEY_ID=" + os.Getenv("AWS_ACCESS_KEY_ID"),
"AWS_DEFAULT_REGION=" + *ECS.Config.Region,
"AWS_SECRET_ACCESS_KEY=" + os.Getenv("AWS_SECRET_ACCESS_KEY"),
"ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=" + os.Getenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"),
},
Cmd: strings.Split(os.Getenv("ECS_FTEST_AGENT_ARGS"), " "),
}
hostConfig := &docker.HostConfig{
Binds: []string{
"/var/run/docker.sock:/var/run/docker.sock",
logdir + ":/logs",
datadir + ":/data",
},
PortBindings: map[docker.Port][]docker.PortBinding{
"51678/tcp": []docker.PortBinding{docker.PortBinding{HostIP: "0.0.0.0"}},
},
Links: agent.Options.ContainerLinks,
}
if agent.Options != nil {
for key, value := range agent.Options.ExtraEnvironment {
dockerConfig.Env = append(dockerConfig.Env, key+"="+value)
}
}
agentContainer, err := agent.DockerClient.CreateContainer(docker.CreateContainerOptions{
Config: dockerConfig,
HostConfig: hostConfig,
})
if err != nil {
agent.t.Fatal("Could not create agent container", err)
}
agent.DockerID = agentContainer.ID
agent.t.Logf("Agent started as docker container: %s\n", agentContainer.ID)
err = agent.DockerClient.StartContainer(agentContainer.ID, nil)
if err != nil {
return errors.New("Could not start agent container " + err.Error())
}
containerMetadata, err := agent.DockerClient.InspectContainer(agentContainer.ID)
if err != nil {
return errors.New("Could not inspect agent container: " + err.Error())
}
agent.IntrospectionURL = "http://localhost:" + containerMetadata.NetworkSettings.Ports["51678/tcp"][0].HostPort
// Wait up to 10s for it to register
var localMetadata handlers.MetadataResponse
for i := 0; i < 10; i++ {
func() {
agentMetadataResp, err := http.Get(agent.IntrospectionURL + "/v1/metadata")
if err != nil {
return
}
metadata, err := ioutil.ReadAll(agentMetadataResp.Body)
if err != nil {
return
}
json.Unmarshal(metadata, &localMetadata)
}()
if localMetadata.ContainerInstanceArn != nil && *localMetadata.ContainerInstanceArn != "" {
break
}
time.Sleep(1 * time.Second)
}
if localMetadata.ContainerInstanceArn == nil {
agent.DockerClient.StopContainer(agent.DockerID, 1)
return errors.New("Could not get agent metadata after launching it")
}
agent.ContainerInstanceArn = *localMetadata.ContainerInstanceArn
agent.Cluster = localMetadata.Cluster
if localMetadata.Version != "" {
versionNumberRegex := regexp.MustCompile(` v(\d+\.\d+\.\d+) `)
versionNumberStr := versionNumberRegex.FindStringSubmatch(localMetadata.Version)
if len(versionNumberStr) == 2 {
agent.Version = string(versionNumberStr[1])
}
}
if agent.Version == "" {
agent.Version = "UNKNOWN"
}
agent.t.Logf("Found agent metadata: %+v", localMetadata)
return nil
}
func (agent *TestAgent) Cleanup() {
agent.StopAgent()
if agent.t.Failed() {
agent.t.Logf("Preserving test dir for failed test %s", agent.TestDir)
} else {
agent.t.Logf("Removing test dir for passed test %s", agent.TestDir)
os.RemoveAll(agent.TestDir)
}
ECS.DeregisterContainerInstance(&ecs.DeregisterContainerInstanceInput{
Cluster: &agent.Cluster,
ContainerInstance: &agent.ContainerInstanceArn,
Force: aws.Bool(true),
})
}
func (agent *TestAgent) StartMultipleTasks(t *testing.T, task string, num int) ([]*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
cis := make([]*string, num)
for i := 0; i < num; i++ {
cis[i] = &agent.ContainerInstanceArn
}
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: cis,
TaskDefinition: &td,
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
testTasks := make([]*TestTask, num)
for i, task := range resp.Tasks {
agent.t.Logf("Started task: %s\n", *task.TaskArn)
testTasks[i] = &TestTask{task}
}
return testTasks, nil
}
func (agent *TestAgent) StartTask(t *testing.T, task string) (*TestTask, error) {
tasks, err := agent.StartMultipleTasks(t, task, 1)
if err != nil {
return nil, err
}
return tasks[0], nil
}
func (agent *TestAgent) StartTaskWithOverrides(t *testing.T, task string, overrides []*ecs.ContainerOverride) (*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: []*string{&agent.ContainerInstanceArn},
TaskDefinition: &td,
Overrides: &ecs.TaskOverride{
ContainerOverrides: overrides,
},
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
agent.t.Logf("Started task: %s\n", *resp.Tasks[0].TaskArn)
return &TestTask{resp.Tasks[0]}, nil
}
// ResolveTaskDockerID determines the Docker ID for a container within a given
// task that has been run by the Agent.
func (agent *TestAgent) ResolveTaskDockerID(task *TestTask, containerName string) (string, error) {
var err error
var dockerId string
for i := 0; i < 5; i++ {
dockerId, err = agent.resolveTaskDockerID(task, containerName)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return dockerId, err
}
func (agent *TestAgent) resolveTaskDockerID(task *TestTask, containerName string) (string, error) {
bodyData, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return "", err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*bodyData, &taskResp)
if err != nil {
return "", err
}
if len(taskResp.Containers) == 0 {
return "", errors.New("No containers in task response")
}
for _, container := range taskResp.Containers {
if container.Name == containerName {
return container.DockerId, nil
}
}
return "", errors.New("No containers matched given name")
}
func (agent *TestAgent) WaitStoppedViaIntrospection(task *TestTask) (bool, error) {
var err error
var isStopped bool
for i := 0; i < 5; i++ {
isStopped, err = agent.waitStoppedViaIntrospection(task)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return isStopped, err
}
func (agent *TestAgent) waitStoppedViaIntrospection(task *TestTask) (bool, error) {
rawResponse, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return false, err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*rawResponse, &taskResp)
if taskResp.KnownStatus == "STOPPED" {
return true, nil
} else {
return false, errors.New("Task should be STOPPED but is " + taskResp.KnownStatus)
}
}
func (agent *TestAgent) WaitRunningViaIntrospection(task *TestTask) (bool, error) {
var err error
var isRunning bool
for i := 0; i < 5; i++ {
isRunning, err = agent.waitRunningViaIntrospection(task)
if err == nil && isRunning {
break
}
time.Sleep(10000 * time.Millisecond)
}
return isRunning, err
}
func (agent *TestAgent) waitRunningViaIntrospection(task *TestTask) (bool, error) {
rawResponse, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return false, err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*rawResponse, &taskResp)
if taskResp.KnownStatus == "RUNNING" {
return true, nil
} else {
return false, errors.New("Task should be RUNNING but is " + taskResp.KnownStatus)
}
}
func (agent *TestAgent) callTaskIntrospectionApi(taskArn string) (*[]byte, error) {
fullIntrospectionApiURL := agent.IntrospectionURL + "/v1/tasks"
if taskArn != "" {
fullIntrospectionApiURL += "?taskarn=" + taskArn
}
agentTasksResp, err := http.Get(fullIntrospectionApiURL)
if err != nil {
return nil, err
}
bodyData, err := ioutil.ReadAll(agentTasksResp.Body)
if err != nil {
return nil, err
}
return &bodyData, nil
}
func (agent *TestAgent) RequireVersion(version string) {
if agent.Version == "UNKNOWN" {
agent.t.Skipf("Skipping test requiring version %v; agent version unknown", version)
}
matches, err := Version(agent.Version).Matches(version)
if err != nil {
agent.t.Skipf("Skipping test requiring version %v; could not compare because of error: %v", version, err)
}
if !matches {
agent.t.Skipf("Skipping test requiring version %v; agent version %v", version, agent.Version)
}
}
type TestTask struct {
*ecs.Task
}
func (task *TestTask) Redescribe() {
res, err := ECS.DescribeTasks(&ecs.DescribeTasksInput{
Cluster: task.ClusterArn,
Tasks: []*string{task.TaskArn},
})
if err == nil && len(res.Failures) == 0 {
task.Task = res.Tasks[0]
}
}
func (task *TestTask) waitStatus(timeout time.Duration, status string) error {
timer := time.NewTimer(timeout)
atStatus := make(chan error, 1)
cancelled := false
go func() {
if *task.LastStatus == "STOPPED" && status != "STOPPED" {
atStatus <- errors.New("Task terminal; will never reach " + status)
return
}
for *task.LastStatus != status && !cancelled {
task.Redescribe()
if *task.LastStatus == status {
break
}
if *task.LastStatus == "STOPPED" && status != "STOPPED" {
atStatus <- errors.New("Task terminal; will never reach " + status)
return
}
time.Sleep(5 * time.Second)
}
atStatus <- nil
}()
select {
case err := <-atStatus:
return err
case <-timer.C:
cancelled = true
return errors.New("Timed out waiting for task to reach " + status + ": " + *task.TaskDefinitionArn + ", " + *task.TaskArn)
}
}
func (task *TestTask) ContainerExitcode(name string) (int, bool) {
for _, cont := range task.Containers {
if cont != nil && cont.Name != nil && cont.ExitCode != nil {
if *cont.Name == name {
return int(*cont.ExitCode), true
}
}
}
return 0, false
}
func (task *TestTask) | (timeout time.Duration) error {
return task.waitStatus(timeout, "RUNNING")
}
func (task *TestTask) WaitStopped(timeout time.Duration) error {
return task.waitStatus(timeout, "STOPPED")
}
func (task *TestTask) ExpectErrorType(containerName, errType string, timeout time.Duration) error {
task.WaitStopped(timeout)
for _, container := range task.Containers {
if *container.Name != containerName {
continue
}
if container.Reason == nil {
return errors.New("Expected error reason")
}
errParts := strings.SplitN(*container.Reason, ":", 2)
if len(errParts) != 2 {
return errors.New("Error did not have a type: " + *container.Reason)
}
if errParts[0] != errType {
return errors.New("Type did not match: " + *container.Reason)
}
return nil
}
return errors.New("Could not find container " + containerName + " in task " + *task.TaskArn)
}
func (task *TestTask) Stop() error {
_, err := ECS.StopTask(&ecs.StopTaskInput{
Cluster: task.ClusterArn,
Task: task.TaskArn,
})
return err
}
func RequireDockerVersion(t *testing.T, selector string) {
dockerClient, err := docker.NewClientFromEnv()
if err != nil {
t.Fatalf("Could not get docker client to check version: %v", err)
}
dockerVersion, err := dockerClient.Version()
if err != nil {
t.Fatalf("Could not get docker version: %v", err)
}
version := dockerVersion.Get("Version")
match, err := Version(version).Matches(selector)
if err != nil {
t.Fatalf("Could not check docker version to match required: %v", err)
}
if !match {
t.Skipf("Skipping test; requires %v, but version is %v", selector, version)
}
}
| WaitRunning | identifier_name |
utils.go | // Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package util
import (
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/handlers"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
docker "github.com/fsouza/go-dockerclient"
)
var ECS *ecs.ECS
var Cluster string
func init() {
var ecsconfig aws.Config
if region := os.Getenv("AWS_REGION"); region != "" {
ecsconfig.Region = ®ion
}
if region := os.Getenv("AWS_DEFAULT_REGION"); region != "" {
ecsconfig.Region = ®ion
}
if ecsconfig.Region == nil {
if iid, err := ec2.GetInstanceIdentityDocument(); err == nil {
ecsconfig.Region = &iid.Region
}
}
if envEndpoint := os.Getenv("ECS_BACKEND_HOST"); envEndpoint != "" {
ecsconfig.Endpoint = &envEndpoint
}
ECS = ecs.New(session.New(&ecsconfig))
Cluster = "ecs-functional-tests"
if envCluster := os.Getenv("ECS_CLUSTER"); envCluster != "" {
Cluster = envCluster
}
ECS.CreateCluster(&ecs.CreateClusterInput{
ClusterName: aws.String(Cluster),
})
}
// GetTaskDefinition is a helper that provies the family:revision for the named
// task definition where the name matches the folder in which the task
// definition is present. In order to avoid re-registering a task definition
// when it has already been regestered in the past, this registers a task
// definition of the pattern 'family-md5sum' with md5sum being the input task
// definition json's md5. This special family name is checked for existence
// before a new one is registered and it is assumed that if it exists, the task
// definition currently represented by the file was registered as such already.
func GetTaskDefinition(name string) (string, error) {
_, filename, _, _ := runtime.Caller(0)
tdData, err := ioutil.ReadFile(filepath.Join(path.Dir(filename), "..", "testdata", "taskdefinitions", name, "task-definition.json"))
if err != nil {
return "", err
}
registerRequest := &ecs.RegisterTaskDefinitionInput{}
err = json.Unmarshal(tdData, registerRequest)
if err != nil {
return "", err
}
tdHash := fmt.Sprintf("%x", md5.Sum(tdData))
idempotentFamily := *registerRequest.Family + "-" + tdHash
existing, err := ECS.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{
TaskDefinition: &idempotentFamily,
})
if err == nil {
return fmt.Sprintf("%s:%d", *existing.TaskDefinition.Family, *existing.TaskDefinition.Revision), nil
}
registerRequest.Family = &idempotentFamily
registered, err := ECS.RegisterTaskDefinition(registerRequest)
if err != nil {
return "", err
}
return fmt.Sprintf("%s:%d", *registered.TaskDefinition.Family, *registered.TaskDefinition.Revision), nil
}
type TestAgent struct {
Image string
DockerID string
IntrospectionURL string
Version string
ContainerInstanceArn string
Cluster string
TestDir string
Logdir string
Options *AgentOptions
DockerClient *docker.Client
t *testing.T
}
type AgentOptions struct {
ExtraEnvironment map[string]string
ContainerLinks []string
}
// RunAgent launches the agent and returns an object which may be used to reference it.
// It will wait until the agent is correctly registered before returning.
// 'version' may be a docker image (e.g. amazon/amazon-ecs-agent:v1.0.0) with
// tag that may be used to run the agent. It defaults to
// 'amazon/amazon-ecs-agent:make', the version created locally by running
// 'make'
func RunAgent(t *testing.T, options *AgentOptions) *TestAgent {
agent := &TestAgent{t: t}
agentImage := "amazon/amazon-ecs-agent:make"
if envImage := os.Getenv("ECS_AGENT_IMAGE"); envImage != "" {
agentImage = envImage
}
agent.Image = agentImage
dockerClient, err := docker.NewClientFromEnv()
if err != nil {
t.Fatal(err)
}
agent.DockerClient = dockerClient
_, err = dockerClient.InspectImage(agentImage)
if err != nil {
err = dockerClient.PullImage(docker.PullImageOptions{Repository: agentImage}, docker.AuthConfiguration{})
if err != nil {
t.Fatal("Could not launch agent", err)
}
}
tmpdirOverride := os.Getenv("ECS_FTEST_TMP")
agentTempdir, err := ioutil.TempDir(tmpdirOverride, "ecs_integ_testdata")
if err != nil {
t.Fatal("Could not create temp dir for test")
}
logdir := filepath.Join(agentTempdir, "logs")
datadir := filepath.Join(agentTempdir, "data")
os.Mkdir(logdir, 0755)
os.Mkdir(datadir, 0755)
agent.TestDir = agentTempdir
agent.Options = options
if options == nil {
agent.Options = &AgentOptions{}
}
t.Logf("Created directory %s to store test data in", agentTempdir)
err = agent.StartAgent()
if err != nil {
t.Fatal(err)
}
return agent
}
func (agent *TestAgent) StopAgent() error {
return agent.DockerClient.StopContainer(agent.DockerID, 10)
}
func (agent *TestAgent) StartAgent() error {
agent.t.Logf("Launching agent with image: %s\n", agent.Image)
logdir := filepath.Join(agent.TestDir, "logs")
datadir := filepath.Join(agent.TestDir, "data")
agent.Logdir = logdir
dockerConfig := &docker.Config{
Image: agent.Image,
ExposedPorts: map[docker.Port]struct{}{
"51678/tcp": struct{}{},
},
Env: []string{
"ECS_CLUSTER=" + Cluster,
"ECS_DATADIR=/data",
"ECS_LOGLEVEL=debug",
"ECS_LOGFILE=/logs/integ_agent.log",
"ECS_BACKEND_HOST=" + os.Getenv("ECS_BACKEND_HOST"),
"AWS_ACCESS_KEY_ID=" + os.Getenv("AWS_ACCESS_KEY_ID"),
"AWS_DEFAULT_REGION=" + *ECS.Config.Region,
"AWS_SECRET_ACCESS_KEY=" + os.Getenv("AWS_SECRET_ACCESS_KEY"),
"ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=" + os.Getenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"),
},
Cmd: strings.Split(os.Getenv("ECS_FTEST_AGENT_ARGS"), " "),
}
hostConfig := &docker.HostConfig{
Binds: []string{
"/var/run/docker.sock:/var/run/docker.sock",
logdir + ":/logs",
datadir + ":/data",
},
PortBindings: map[docker.Port][]docker.PortBinding{
"51678/tcp": []docker.PortBinding{docker.PortBinding{HostIP: "0.0.0.0"}},
},
Links: agent.Options.ContainerLinks,
}
if agent.Options != nil {
for key, value := range agent.Options.ExtraEnvironment {
dockerConfig.Env = append(dockerConfig.Env, key+"="+value)
}
}
agentContainer, err := agent.DockerClient.CreateContainer(docker.CreateContainerOptions{
Config: dockerConfig,
HostConfig: hostConfig,
})
if err != nil {
agent.t.Fatal("Could not create agent container", err)
}
agent.DockerID = agentContainer.ID
agent.t.Logf("Agent started as docker container: %s\n", agentContainer.ID)
err = agent.DockerClient.StartContainer(agentContainer.ID, nil)
if err != nil {
return errors.New("Could not start agent container " + err.Error())
}
containerMetadata, err := agent.DockerClient.InspectContainer(agentContainer.ID)
if err != nil {
return errors.New("Could not inspect agent container: " + err.Error())
}
agent.IntrospectionURL = "http://localhost:" + containerMetadata.NetworkSettings.Ports["51678/tcp"][0].HostPort
// Wait up to 10s for it to register
var localMetadata handlers.MetadataResponse
for i := 0; i < 10; i++ {
func() {
agentMetadataResp, err := http.Get(agent.IntrospectionURL + "/v1/metadata")
if err != nil {
return
}
metadata, err := ioutil.ReadAll(agentMetadataResp.Body)
if err != nil {
return
}
json.Unmarshal(metadata, &localMetadata)
}()
if localMetadata.ContainerInstanceArn != nil && *localMetadata.ContainerInstanceArn != "" {
break
}
time.Sleep(1 * time.Second)
}
if localMetadata.ContainerInstanceArn == nil {
agent.DockerClient.StopContainer(agent.DockerID, 1)
return errors.New("Could not get agent metadata after launching it")
}
agent.ContainerInstanceArn = *localMetadata.ContainerInstanceArn
agent.Cluster = localMetadata.Cluster
if localMetadata.Version != "" {
versionNumberRegex := regexp.MustCompile(` v(\d+\.\d+\.\d+) `)
versionNumberStr := versionNumberRegex.FindStringSubmatch(localMetadata.Version)
if len(versionNumberStr) == 2 |
}
if agent.Version == "" {
agent.Version = "UNKNOWN"
}
agent.t.Logf("Found agent metadata: %+v", localMetadata)
return nil
}
func (agent *TestAgent) Cleanup() {
agent.StopAgent()
if agent.t.Failed() {
agent.t.Logf("Preserving test dir for failed test %s", agent.TestDir)
} else {
agent.t.Logf("Removing test dir for passed test %s", agent.TestDir)
os.RemoveAll(agent.TestDir)
}
ECS.DeregisterContainerInstance(&ecs.DeregisterContainerInstanceInput{
Cluster: &agent.Cluster,
ContainerInstance: &agent.ContainerInstanceArn,
Force: aws.Bool(true),
})
}
func (agent *TestAgent) StartMultipleTasks(t *testing.T, task string, num int) ([]*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
cis := make([]*string, num)
for i := 0; i < num; i++ {
cis[i] = &agent.ContainerInstanceArn
}
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: cis,
TaskDefinition: &td,
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
testTasks := make([]*TestTask, num)
for i, task := range resp.Tasks {
agent.t.Logf("Started task: %s\n", *task.TaskArn)
testTasks[i] = &TestTask{task}
}
return testTasks, nil
}
func (agent *TestAgent) StartTask(t *testing.T, task string) (*TestTask, error) {
tasks, err := agent.StartMultipleTasks(t, task, 1)
if err != nil {
return nil, err
}
return tasks[0], nil
}
func (agent *TestAgent) StartTaskWithOverrides(t *testing.T, task string, overrides []*ecs.ContainerOverride) (*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: []*string{&agent.ContainerInstanceArn},
TaskDefinition: &td,
Overrides: &ecs.TaskOverride{
ContainerOverrides: overrides,
},
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
agent.t.Logf("Started task: %s\n", *resp.Tasks[0].TaskArn)
return &TestTask{resp.Tasks[0]}, nil
}
// ResolveTaskDockerID determines the Docker ID for a container within a given
// task that has been run by the Agent.
func (agent *TestAgent) ResolveTaskDockerID(task *TestTask, containerName string) (string, error) {
var err error
var dockerId string
for i := 0; i < 5; i++ {
dockerId, err = agent.resolveTaskDockerID(task, containerName)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return dockerId, err
}
func (agent *TestAgent) resolveTaskDockerID(task *TestTask, containerName string) (string, error) {
bodyData, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return "", err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*bodyData, &taskResp)
if err != nil {
return "", err
}
if len(taskResp.Containers) == 0 {
return "", errors.New("No containers in task response")
}
for _, container := range taskResp.Containers {
if container.Name == containerName {
return container.DockerId, nil
}
}
return "", errors.New("No containers matched given name")
}
func (agent *TestAgent) WaitStoppedViaIntrospection(task *TestTask) (bool, error) {
var err error
var isStopped bool
for i := 0; i < 5; i++ {
isStopped, err = agent.waitStoppedViaIntrospection(task)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return isStopped, err
}
func (agent *TestAgent) waitStoppedViaIntrospection(task *TestTask) (bool, error) {
rawResponse, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return false, err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*rawResponse, &taskResp)
if taskResp.KnownStatus == "STOPPED" {
return true, nil
} else {
return false, errors.New("Task should be STOPPED but is " + taskResp.KnownStatus)
}
}
func (agent *TestAgent) WaitRunningViaIntrospection(task *TestTask) (bool, error) {
var err error
var isRunning bool
for i := 0; i < 5; i++ {
isRunning, err = agent.waitRunningViaIntrospection(task)
if err == nil && isRunning {
break
}
time.Sleep(10000 * time.Millisecond)
}
return isRunning, err
}
func (agent *TestAgent) waitRunningViaIntrospection(task *TestTask) (bool, error) {
rawResponse, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return false, err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*rawResponse, &taskResp)
if taskResp.KnownStatus == "RUNNING" {
return true, nil
} else {
return false, errors.New("Task should be RUNNING but is " + taskResp.KnownStatus)
}
}
func (agent *TestAgent) callTaskIntrospectionApi(taskArn string) (*[]byte, error) {
fullIntrospectionApiURL := agent.IntrospectionURL + "/v1/tasks"
if taskArn != "" {
fullIntrospectionApiURL += "?taskarn=" + taskArn
}
agentTasksResp, err := http.Get(fullIntrospectionApiURL)
if err != nil {
return nil, err
}
bodyData, err := ioutil.ReadAll(agentTasksResp.Body)
if err != nil {
return nil, err
}
return &bodyData, nil
}
func (agent *TestAgent) RequireVersion(version string) {
if agent.Version == "UNKNOWN" {
agent.t.Skipf("Skipping test requiring version %v; agent version unknown", version)
}
matches, err := Version(agent.Version).Matches(version)
if err != nil {
agent.t.Skipf("Skipping test requiring version %v; could not compare because of error: %v", version, err)
}
if !matches {
agent.t.Skipf("Skipping test requiring version %v; agent version %v", version, agent.Version)
}
}
type TestTask struct {
*ecs.Task
}
func (task *TestTask) Redescribe() {
res, err := ECS.DescribeTasks(&ecs.DescribeTasksInput{
Cluster: task.ClusterArn,
Tasks: []*string{task.TaskArn},
})
if err == nil && len(res.Failures) == 0 {
task.Task = res.Tasks[0]
}
}
func (task *TestTask) waitStatus(timeout time.Duration, status string) error {
timer := time.NewTimer(timeout)
atStatus := make(chan error, 1)
cancelled := false
go func() {
if *task.LastStatus == "STOPPED" && status != "STOPPED" {
atStatus <- errors.New("Task terminal; will never reach " + status)
return
}
for *task.LastStatus != status && !cancelled {
task.Redescribe()
if *task.LastStatus == status {
break
}
if *task.LastStatus == "STOPPED" && status != "STOPPED" {
atStatus <- errors.New("Task terminal; will never reach " + status)
return
}
time.Sleep(5 * time.Second)
}
atStatus <- nil
}()
select {
case err := <-atStatus:
return err
case <-timer.C:
cancelled = true
return errors.New("Timed out waiting for task to reach " + status + ": " + *task.TaskDefinitionArn + ", " + *task.TaskArn)
}
}
func (task *TestTask) ContainerExitcode(name string) (int, bool) {
for _, cont := range task.Containers {
if cont != nil && cont.Name != nil && cont.ExitCode != nil {
if *cont.Name == name {
return int(*cont.ExitCode), true
}
}
}
return 0, false
}
func (task *TestTask) WaitRunning(timeout time.Duration) error {
return task.waitStatus(timeout, "RUNNING")
}
func (task *TestTask) WaitStopped(timeout time.Duration) error {
return task.waitStatus(timeout, "STOPPED")
}
func (task *TestTask) ExpectErrorType(containerName, errType string, timeout time.Duration) error {
task.WaitStopped(timeout)
for _, container := range task.Containers {
if *container.Name != containerName {
continue
}
if container.Reason == nil {
return errors.New("Expected error reason")
}
errParts := strings.SplitN(*container.Reason, ":", 2)
if len(errParts) != 2 {
return errors.New("Error did not have a type: " + *container.Reason)
}
if errParts[0] != errType {
return errors.New("Type did not match: " + *container.Reason)
}
return nil
}
return errors.New("Could not find container " + containerName + " in task " + *task.TaskArn)
}
func (task *TestTask) Stop() error {
_, err := ECS.StopTask(&ecs.StopTaskInput{
Cluster: task.ClusterArn,
Task: task.TaskArn,
})
return err
}
func RequireDockerVersion(t *testing.T, selector string) {
dockerClient, err := docker.NewClientFromEnv()
if err != nil {
t.Fatalf("Could not get docker client to check version: %v", err)
}
dockerVersion, err := dockerClient.Version()
if err != nil {
t.Fatalf("Could not get docker version: %v", err)
}
version := dockerVersion.Get("Version")
match, err := Version(version).Matches(selector)
if err != nil {
t.Fatalf("Could not check docker version to match required: %v", err)
}
if !match {
t.Skipf("Skipping test; requires %v, but version is %v", selector, version)
}
}
| {
agent.Version = string(versionNumberStr[1])
} | conditional_block |
utils.go | // Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package util
import (
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/handlers"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
docker "github.com/fsouza/go-dockerclient"
)
var ECS *ecs.ECS
var Cluster string
func init() {
var ecsconfig aws.Config
if region := os.Getenv("AWS_REGION"); region != "" {
ecsconfig.Region = ®ion
}
if region := os.Getenv("AWS_DEFAULT_REGION"); region != "" {
ecsconfig.Region = ®ion
}
if ecsconfig.Region == nil {
if iid, err := ec2.GetInstanceIdentityDocument(); err == nil {
ecsconfig.Region = &iid.Region
}
}
if envEndpoint := os.Getenv("ECS_BACKEND_HOST"); envEndpoint != "" {
ecsconfig.Endpoint = &envEndpoint
}
ECS = ecs.New(session.New(&ecsconfig))
Cluster = "ecs-functional-tests"
if envCluster := os.Getenv("ECS_CLUSTER"); envCluster != "" {
Cluster = envCluster
}
ECS.CreateCluster(&ecs.CreateClusterInput{
ClusterName: aws.String(Cluster),
})
}
// GetTaskDefinition is a helper that provies the family:revision for the named
// task definition where the name matches the folder in which the task
// definition is present. In order to avoid re-registering a task definition
// when it has already been regestered in the past, this registers a task
// definition of the pattern 'family-md5sum' with md5sum being the input task
// definition json's md5. This special family name is checked for existence
// before a new one is registered and it is assumed that if it exists, the task
// definition currently represented by the file was registered as such already.
func GetTaskDefinition(name string) (string, error) {
_, filename, _, _ := runtime.Caller(0)
tdData, err := ioutil.ReadFile(filepath.Join(path.Dir(filename), "..", "testdata", "taskdefinitions", name, "task-definition.json"))
if err != nil {
return "", err
}
registerRequest := &ecs.RegisterTaskDefinitionInput{}
err = json.Unmarshal(tdData, registerRequest)
if err != nil {
return "", err
}
tdHash := fmt.Sprintf("%x", md5.Sum(tdData))
idempotentFamily := *registerRequest.Family + "-" + tdHash
existing, err := ECS.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{
TaskDefinition: &idempotentFamily,
})
if err == nil {
return fmt.Sprintf("%s:%d", *existing.TaskDefinition.Family, *existing.TaskDefinition.Revision), nil
}
registerRequest.Family = &idempotentFamily
registered, err := ECS.RegisterTaskDefinition(registerRequest)
if err != nil {
return "", err
}
return fmt.Sprintf("%s:%d", *registered.TaskDefinition.Family, *registered.TaskDefinition.Revision), nil
}
type TestAgent struct {
Image string
DockerID string
IntrospectionURL string
Version string
ContainerInstanceArn string
Cluster string
TestDir string
Logdir string
Options *AgentOptions
DockerClient *docker.Client
t *testing.T
}
type AgentOptions struct {
ExtraEnvironment map[string]string
ContainerLinks []string
}
// RunAgent launches the agent and returns an object which may be used to reference it.
// It will wait until the agent is correctly registered before returning.
// 'version' may be a docker image (e.g. amazon/amazon-ecs-agent:v1.0.0) with
// tag that may be used to run the agent. It defaults to
// 'amazon/amazon-ecs-agent:make', the version created locally by running
// 'make'
func RunAgent(t *testing.T, options *AgentOptions) *TestAgent {
agent := &TestAgent{t: t}
agentImage := "amazon/amazon-ecs-agent:make"
if envImage := os.Getenv("ECS_AGENT_IMAGE"); envImage != "" {
agentImage = envImage
}
agent.Image = agentImage
dockerClient, err := docker.NewClientFromEnv()
if err != nil {
t.Fatal(err)
}
agent.DockerClient = dockerClient
_, err = dockerClient.InspectImage(agentImage)
if err != nil {
err = dockerClient.PullImage(docker.PullImageOptions{Repository: agentImage}, docker.AuthConfiguration{})
if err != nil {
t.Fatal("Could not launch agent", err)
}
}
tmpdirOverride := os.Getenv("ECS_FTEST_TMP")
agentTempdir, err := ioutil.TempDir(tmpdirOverride, "ecs_integ_testdata")
if err != nil {
t.Fatal("Could not create temp dir for test")
}
logdir := filepath.Join(agentTempdir, "logs")
datadir := filepath.Join(agentTempdir, "data")
os.Mkdir(logdir, 0755)
os.Mkdir(datadir, 0755)
agent.TestDir = agentTempdir
agent.Options = options
if options == nil {
agent.Options = &AgentOptions{}
}
t.Logf("Created directory %s to store test data in", agentTempdir)
err = agent.StartAgent()
if err != nil {
t.Fatal(err)
}
return agent
}
func (agent *TestAgent) StopAgent() error {
return agent.DockerClient.StopContainer(agent.DockerID, 10)
}
func (agent *TestAgent) StartAgent() error {
agent.t.Logf("Launching agent with image: %s\n", agent.Image)
logdir := filepath.Join(agent.TestDir, "logs")
datadir := filepath.Join(agent.TestDir, "data")
agent.Logdir = logdir
dockerConfig := &docker.Config{
Image: agent.Image,
ExposedPorts: map[docker.Port]struct{}{
"51678/tcp": struct{}{},
},
Env: []string{
"ECS_CLUSTER=" + Cluster,
"ECS_DATADIR=/data",
"ECS_LOGLEVEL=debug",
"ECS_LOGFILE=/logs/integ_agent.log",
"ECS_BACKEND_HOST=" + os.Getenv("ECS_BACKEND_HOST"),
"AWS_ACCESS_KEY_ID=" + os.Getenv("AWS_ACCESS_KEY_ID"),
"AWS_DEFAULT_REGION=" + *ECS.Config.Region,
"AWS_SECRET_ACCESS_KEY=" + os.Getenv("AWS_SECRET_ACCESS_KEY"),
"ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=" + os.Getenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"),
},
Cmd: strings.Split(os.Getenv("ECS_FTEST_AGENT_ARGS"), " "),
}
hostConfig := &docker.HostConfig{
Binds: []string{
"/var/run/docker.sock:/var/run/docker.sock",
logdir + ":/logs",
datadir + ":/data",
},
PortBindings: map[docker.Port][]docker.PortBinding{
"51678/tcp": []docker.PortBinding{docker.PortBinding{HostIP: "0.0.0.0"}},
},
Links: agent.Options.ContainerLinks,
}
if agent.Options != nil {
for key, value := range agent.Options.ExtraEnvironment {
dockerConfig.Env = append(dockerConfig.Env, key+"="+value)
}
}
agentContainer, err := agent.DockerClient.CreateContainer(docker.CreateContainerOptions{
Config: dockerConfig,
HostConfig: hostConfig,
})
if err != nil {
agent.t.Fatal("Could not create agent container", err)
}
agent.DockerID = agentContainer.ID
agent.t.Logf("Agent started as docker container: %s\n", agentContainer.ID)
err = agent.DockerClient.StartContainer(agentContainer.ID, nil)
if err != nil {
return errors.New("Could not start agent container " + err.Error())
}
containerMetadata, err := agent.DockerClient.InspectContainer(agentContainer.ID)
if err != nil {
return errors.New("Could not inspect agent container: " + err.Error())
}
agent.IntrospectionURL = "http://localhost:" + containerMetadata.NetworkSettings.Ports["51678/tcp"][0].HostPort
// Wait up to 10s for it to register
var localMetadata handlers.MetadataResponse
for i := 0; i < 10; i++ {
func() {
agentMetadataResp, err := http.Get(agent.IntrospectionURL + "/v1/metadata")
if err != nil {
return
}
metadata, err := ioutil.ReadAll(agentMetadataResp.Body)
if err != nil {
return
}
json.Unmarshal(metadata, &localMetadata)
}()
if localMetadata.ContainerInstanceArn != nil && *localMetadata.ContainerInstanceArn != "" {
break
}
time.Sleep(1 * time.Second)
}
if localMetadata.ContainerInstanceArn == nil {
agent.DockerClient.StopContainer(agent.DockerID, 1)
return errors.New("Could not get agent metadata after launching it")
}
agent.ContainerInstanceArn = *localMetadata.ContainerInstanceArn
agent.Cluster = localMetadata.Cluster
if localMetadata.Version != "" {
versionNumberRegex := regexp.MustCompile(` v(\d+\.\d+\.\d+) `)
versionNumberStr := versionNumberRegex.FindStringSubmatch(localMetadata.Version)
if len(versionNumberStr) == 2 {
agent.Version = string(versionNumberStr[1])
}
}
if agent.Version == "" {
agent.Version = "UNKNOWN"
}
agent.t.Logf("Found agent metadata: %+v", localMetadata)
return nil
}
func (agent *TestAgent) Cleanup() {
agent.StopAgent()
if agent.t.Failed() {
agent.t.Logf("Preserving test dir for failed test %s", agent.TestDir)
} else {
agent.t.Logf("Removing test dir for passed test %s", agent.TestDir)
os.RemoveAll(agent.TestDir)
}
ECS.DeregisterContainerInstance(&ecs.DeregisterContainerInstanceInput{
Cluster: &agent.Cluster,
ContainerInstance: &agent.ContainerInstanceArn,
Force: aws.Bool(true),
})
}
func (agent *TestAgent) StartMultipleTasks(t *testing.T, task string, num int) ([]*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
cis := make([]*string, num)
for i := 0; i < num; i++ {
cis[i] = &agent.ContainerInstanceArn
}
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: cis,
TaskDefinition: &td,
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
testTasks := make([]*TestTask, num)
for i, task := range resp.Tasks {
agent.t.Logf("Started task: %s\n", *task.TaskArn)
testTasks[i] = &TestTask{task}
}
return testTasks, nil
}
func (agent *TestAgent) StartTask(t *testing.T, task string) (*TestTask, error) |
func (agent *TestAgent) StartTaskWithOverrides(t *testing.T, task string, overrides []*ecs.ContainerOverride) (*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: []*string{&agent.ContainerInstanceArn},
TaskDefinition: &td,
Overrides: &ecs.TaskOverride{
ContainerOverrides: overrides,
},
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
agent.t.Logf("Started task: %s\n", *resp.Tasks[0].TaskArn)
return &TestTask{resp.Tasks[0]}, nil
}
// ResolveTaskDockerID determines the Docker ID for a container within a given
// task that has been run by the Agent.
func (agent *TestAgent) ResolveTaskDockerID(task *TestTask, containerName string) (string, error) {
var err error
var dockerId string
for i := 0; i < 5; i++ {
dockerId, err = agent.resolveTaskDockerID(task, containerName)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return dockerId, err
}
func (agent *TestAgent) resolveTaskDockerID(task *TestTask, containerName string) (string, error) {
bodyData, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return "", err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*bodyData, &taskResp)
if err != nil {
return "", err
}
if len(taskResp.Containers) == 0 {
return "", errors.New("No containers in task response")
}
for _, container := range taskResp.Containers {
if container.Name == containerName {
return container.DockerId, nil
}
}
return "", errors.New("No containers matched given name")
}
func (agent *TestAgent) WaitStoppedViaIntrospection(task *TestTask) (bool, error) {
var err error
var isStopped bool
for i := 0; i < 5; i++ {
isStopped, err = agent.waitStoppedViaIntrospection(task)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return isStopped, err
}
func (agent *TestAgent) waitStoppedViaIntrospection(task *TestTask) (bool, error) {
rawResponse, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return false, err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*rawResponse, &taskResp)
if taskResp.KnownStatus == "STOPPED" {
return true, nil
} else {
return false, errors.New("Task should be STOPPED but is " + taskResp.KnownStatus)
}
}
func (agent *TestAgent) WaitRunningViaIntrospection(task *TestTask) (bool, error) {
var err error
var isRunning bool
for i := 0; i < 5; i++ {
isRunning, err = agent.waitRunningViaIntrospection(task)
if err == nil && isRunning {
break
}
time.Sleep(10000 * time.Millisecond)
}
return isRunning, err
}
func (agent *TestAgent) waitRunningViaIntrospection(task *TestTask) (bool, error) {
rawResponse, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return false, err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*rawResponse, &taskResp)
if taskResp.KnownStatus == "RUNNING" {
return true, nil
} else {
return false, errors.New("Task should be RUNNING but is " + taskResp.KnownStatus)
}
}
func (agent *TestAgent) callTaskIntrospectionApi(taskArn string) (*[]byte, error) {
fullIntrospectionApiURL := agent.IntrospectionURL + "/v1/tasks"
if taskArn != "" {
fullIntrospectionApiURL += "?taskarn=" + taskArn
}
agentTasksResp, err := http.Get(fullIntrospectionApiURL)
if err != nil {
return nil, err
}
bodyData, err := ioutil.ReadAll(agentTasksResp.Body)
if err != nil {
return nil, err
}
return &bodyData, nil
}
func (agent *TestAgent) RequireVersion(version string) {
if agent.Version == "UNKNOWN" {
agent.t.Skipf("Skipping test requiring version %v; agent version unknown", version)
}
matches, err := Version(agent.Version).Matches(version)
if err != nil {
agent.t.Skipf("Skipping test requiring version %v; could not compare because of error: %v", version, err)
}
if !matches {
agent.t.Skipf("Skipping test requiring version %v; agent version %v", version, agent.Version)
}
}
type TestTask struct {
*ecs.Task
}
func (task *TestTask) Redescribe() {
res, err := ECS.DescribeTasks(&ecs.DescribeTasksInput{
Cluster: task.ClusterArn,
Tasks: []*string{task.TaskArn},
})
if err == nil && len(res.Failures) == 0 {
task.Task = res.Tasks[0]
}
}
func (task *TestTask) waitStatus(timeout time.Duration, status string) error {
timer := time.NewTimer(timeout)
atStatus := make(chan error, 1)
cancelled := false
go func() {
if *task.LastStatus == "STOPPED" && status != "STOPPED" {
atStatus <- errors.New("Task terminal; will never reach " + status)
return
}
for *task.LastStatus != status && !cancelled {
task.Redescribe()
if *task.LastStatus == status {
break
}
if *task.LastStatus == "STOPPED" && status != "STOPPED" {
atStatus <- errors.New("Task terminal; will never reach " + status)
return
}
time.Sleep(5 * time.Second)
}
atStatus <- nil
}()
select {
case err := <-atStatus:
return err
case <-timer.C:
cancelled = true
return errors.New("Timed out waiting for task to reach " + status + ": " + *task.TaskDefinitionArn + ", " + *task.TaskArn)
}
}
func (task *TestTask) ContainerExitcode(name string) (int, bool) {
for _, cont := range task.Containers {
if cont != nil && cont.Name != nil && cont.ExitCode != nil {
if *cont.Name == name {
return int(*cont.ExitCode), true
}
}
}
return 0, false
}
func (task *TestTask) WaitRunning(timeout time.Duration) error {
return task.waitStatus(timeout, "RUNNING")
}
func (task *TestTask) WaitStopped(timeout time.Duration) error {
return task.waitStatus(timeout, "STOPPED")
}
func (task *TestTask) ExpectErrorType(containerName, errType string, timeout time.Duration) error {
task.WaitStopped(timeout)
for _, container := range task.Containers {
if *container.Name != containerName {
continue
}
if container.Reason == nil {
return errors.New("Expected error reason")
}
errParts := strings.SplitN(*container.Reason, ":", 2)
if len(errParts) != 2 {
return errors.New("Error did not have a type: " + *container.Reason)
}
if errParts[0] != errType {
return errors.New("Type did not match: " + *container.Reason)
}
return nil
}
return errors.New("Could not find container " + containerName + " in task " + *task.TaskArn)
}
func (task *TestTask) Stop() error {
_, err := ECS.StopTask(&ecs.StopTaskInput{
Cluster: task.ClusterArn,
Task: task.TaskArn,
})
return err
}
func RequireDockerVersion(t *testing.T, selector string) {
dockerClient, err := docker.NewClientFromEnv()
if err != nil {
t.Fatalf("Could not get docker client to check version: %v", err)
}
dockerVersion, err := dockerClient.Version()
if err != nil {
t.Fatalf("Could not get docker version: %v", err)
}
version := dockerVersion.Get("Version")
match, err := Version(version).Matches(selector)
if err != nil {
t.Fatalf("Could not check docker version to match required: %v", err)
}
if !match {
t.Skipf("Skipping test; requires %v, but version is %v", selector, version)
}
}
| {
tasks, err := agent.StartMultipleTasks(t, task, 1)
if err != nil {
return nil, err
}
return tasks[0], nil
} | identifier_body |
utils.go | // Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package util
import (
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/handlers"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
docker "github.com/fsouza/go-dockerclient"
)
var ECS *ecs.ECS
var Cluster string
func init() {
var ecsconfig aws.Config
if region := os.Getenv("AWS_REGION"); region != "" {
ecsconfig.Region = ®ion
}
if region := os.Getenv("AWS_DEFAULT_REGION"); region != "" {
ecsconfig.Region = ®ion
}
if ecsconfig.Region == nil {
if iid, err := ec2.GetInstanceIdentityDocument(); err == nil {
ecsconfig.Region = &iid.Region
}
}
if envEndpoint := os.Getenv("ECS_BACKEND_HOST"); envEndpoint != "" {
ecsconfig.Endpoint = &envEndpoint
}
ECS = ecs.New(session.New(&ecsconfig))
Cluster = "ecs-functional-tests"
if envCluster := os.Getenv("ECS_CLUSTER"); envCluster != "" {
Cluster = envCluster
}
ECS.CreateCluster(&ecs.CreateClusterInput{
ClusterName: aws.String(Cluster),
})
}
// GetTaskDefinition is a helper that provies the family:revision for the named
// task definition where the name matches the folder in which the task
// definition is present. In order to avoid re-registering a task definition
// when it has already been regestered in the past, this registers a task
// definition of the pattern 'family-md5sum' with md5sum being the input task
// definition json's md5. This special family name is checked for existence
// before a new one is registered and it is assumed that if it exists, the task
// definition currently represented by the file was registered as such already.
func GetTaskDefinition(name string) (string, error) {
_, filename, _, _ := runtime.Caller(0)
tdData, err := ioutil.ReadFile(filepath.Join(path.Dir(filename), "..", "testdata", "taskdefinitions", name, "task-definition.json"))
if err != nil {
return "", err
}
registerRequest := &ecs.RegisterTaskDefinitionInput{}
err = json.Unmarshal(tdData, registerRequest)
if err != nil {
return "", err
}
tdHash := fmt.Sprintf("%x", md5.Sum(tdData))
idempotentFamily := *registerRequest.Family + "-" + tdHash
existing, err := ECS.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{
TaskDefinition: &idempotentFamily,
})
if err == nil {
return fmt.Sprintf("%s:%d", *existing.TaskDefinition.Family, *existing.TaskDefinition.Revision), nil
}
registerRequest.Family = &idempotentFamily
registered, err := ECS.RegisterTaskDefinition(registerRequest)
if err != nil {
return "", err
}
return fmt.Sprintf("%s:%d", *registered.TaskDefinition.Family, *registered.TaskDefinition.Revision), nil
}
type TestAgent struct {
Image string
DockerID string
IntrospectionURL string
Version string
ContainerInstanceArn string
Cluster string
TestDir string
Logdir string
Options *AgentOptions
DockerClient *docker.Client
t *testing.T
}
type AgentOptions struct {
ExtraEnvironment map[string]string
ContainerLinks []string
}
// RunAgent launches the agent and returns an object which may be used to reference it.
// It will wait until the agent is correctly registered before returning.
// 'version' may be a docker image (e.g. amazon/amazon-ecs-agent:v1.0.0) with
// tag that may be used to run the agent. It defaults to
// 'amazon/amazon-ecs-agent:make', the version created locally by running
// 'make'
func RunAgent(t *testing.T, options *AgentOptions) *TestAgent {
agent := &TestAgent{t: t}
agentImage := "amazon/amazon-ecs-agent:make"
if envImage := os.Getenv("ECS_AGENT_IMAGE"); envImage != "" {
agentImage = envImage
}
agent.Image = agentImage
dockerClient, err := docker.NewClientFromEnv()
if err != nil {
t.Fatal(err)
}
agent.DockerClient = dockerClient
_, err = dockerClient.InspectImage(agentImage)
if err != nil {
err = dockerClient.PullImage(docker.PullImageOptions{Repository: agentImage}, docker.AuthConfiguration{})
if err != nil {
t.Fatal("Could not launch agent", err)
}
}
tmpdirOverride := os.Getenv("ECS_FTEST_TMP")
agentTempdir, err := ioutil.TempDir(tmpdirOverride, "ecs_integ_testdata")
if err != nil {
t.Fatal("Could not create temp dir for test")
}
logdir := filepath.Join(agentTempdir, "logs")
datadir := filepath.Join(agentTempdir, "data")
os.Mkdir(logdir, 0755)
os.Mkdir(datadir, 0755)
agent.TestDir = agentTempdir
agent.Options = options
if options == nil {
agent.Options = &AgentOptions{}
}
t.Logf("Created directory %s to store test data in", agentTempdir)
err = agent.StartAgent()
if err != nil {
t.Fatal(err)
}
return agent
}
func (agent *TestAgent) StopAgent() error {
return agent.DockerClient.StopContainer(agent.DockerID, 10)
}
func (agent *TestAgent) StartAgent() error {
agent.t.Logf("Launching agent with image: %s\n", agent.Image)
logdir := filepath.Join(agent.TestDir, "logs")
datadir := filepath.Join(agent.TestDir, "data")
agent.Logdir = logdir
dockerConfig := &docker.Config{
Image: agent.Image,
ExposedPorts: map[docker.Port]struct{}{
"51678/tcp": struct{}{},
},
Env: []string{
"ECS_CLUSTER=" + Cluster,
"ECS_DATADIR=/data",
"ECS_LOGLEVEL=debug",
"ECS_LOGFILE=/logs/integ_agent.log",
"ECS_BACKEND_HOST=" + os.Getenv("ECS_BACKEND_HOST"),
"AWS_ACCESS_KEY_ID=" + os.Getenv("AWS_ACCESS_KEY_ID"),
"AWS_DEFAULT_REGION=" + *ECS.Config.Region,
"AWS_SECRET_ACCESS_KEY=" + os.Getenv("AWS_SECRET_ACCESS_KEY"),
"ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=" + os.Getenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"),
},
Cmd: strings.Split(os.Getenv("ECS_FTEST_AGENT_ARGS"), " "),
}
hostConfig := &docker.HostConfig{
Binds: []string{
"/var/run/docker.sock:/var/run/docker.sock",
logdir + ":/logs",
datadir + ":/data",
},
PortBindings: map[docker.Port][]docker.PortBinding{
"51678/tcp": []docker.PortBinding{docker.PortBinding{HostIP: "0.0.0.0"}},
},
Links: agent.Options.ContainerLinks,
}
if agent.Options != nil {
for key, value := range agent.Options.ExtraEnvironment {
dockerConfig.Env = append(dockerConfig.Env, key+"="+value)
}
}
agentContainer, err := agent.DockerClient.CreateContainer(docker.CreateContainerOptions{
Config: dockerConfig,
HostConfig: hostConfig,
})
if err != nil {
agent.t.Fatal("Could not create agent container", err)
}
agent.DockerID = agentContainer.ID
agent.t.Logf("Agent started as docker container: %s\n", agentContainer.ID)
err = agent.DockerClient.StartContainer(agentContainer.ID, nil)
if err != nil {
return errors.New("Could not start agent container " + err.Error())
}
containerMetadata, err := agent.DockerClient.InspectContainer(agentContainer.ID)
if err != nil {
return errors.New("Could not inspect agent container: " + err.Error())
}
agent.IntrospectionURL = "http://localhost:" + containerMetadata.NetworkSettings.Ports["51678/tcp"][0].HostPort
// Wait up to 10s for it to register
var localMetadata handlers.MetadataResponse
for i := 0; i < 10; i++ {
func() {
agentMetadataResp, err := http.Get(agent.IntrospectionURL + "/v1/metadata")
if err != nil {
return
}
metadata, err := ioutil.ReadAll(agentMetadataResp.Body)
if err != nil {
return
}
json.Unmarshal(metadata, &localMetadata)
}()
if localMetadata.ContainerInstanceArn != nil && *localMetadata.ContainerInstanceArn != "" {
break
}
time.Sleep(1 * time.Second)
}
if localMetadata.ContainerInstanceArn == nil {
agent.DockerClient.StopContainer(agent.DockerID, 1)
return errors.New("Could not get agent metadata after launching it")
}
agent.ContainerInstanceArn = *localMetadata.ContainerInstanceArn
agent.Cluster = localMetadata.Cluster
if localMetadata.Version != "" {
versionNumberRegex := regexp.MustCompile(` v(\d+\.\d+\.\d+) `)
versionNumberStr := versionNumberRegex.FindStringSubmatch(localMetadata.Version)
if len(versionNumberStr) == 2 {
agent.Version = string(versionNumberStr[1])
}
}
if agent.Version == "" {
agent.Version = "UNKNOWN"
}
agent.t.Logf("Found agent metadata: %+v", localMetadata)
return nil
}
func (agent *TestAgent) Cleanup() {
agent.StopAgent()
if agent.t.Failed() {
agent.t.Logf("Preserving test dir for failed test %s", agent.TestDir)
} else {
agent.t.Logf("Removing test dir for passed test %s", agent.TestDir)
os.RemoveAll(agent.TestDir)
}
ECS.DeregisterContainerInstance(&ecs.DeregisterContainerInstanceInput{
Cluster: &agent.Cluster,
ContainerInstance: &agent.ContainerInstanceArn,
Force: aws.Bool(true),
})
}
func (agent *TestAgent) StartMultipleTasks(t *testing.T, task string, num int) ([]*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
cis := make([]*string, num)
for i := 0; i < num; i++ {
cis[i] = &agent.ContainerInstanceArn
}
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: cis,
TaskDefinition: &td,
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
testTasks := make([]*TestTask, num)
for i, task := range resp.Tasks {
agent.t.Logf("Started task: %s\n", *task.TaskArn)
testTasks[i] = &TestTask{task}
}
return testTasks, nil
}
func (agent *TestAgent) StartTask(t *testing.T, task string) (*TestTask, error) {
tasks, err := agent.StartMultipleTasks(t, task, 1)
if err != nil {
return nil, err
}
return tasks[0], nil
}
func (agent *TestAgent) StartTaskWithOverrides(t *testing.T, task string, overrides []*ecs.ContainerOverride) (*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: []*string{&agent.ContainerInstanceArn},
TaskDefinition: &td,
Overrides: &ecs.TaskOverride{
ContainerOverrides: overrides,
},
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
agent.t.Logf("Started task: %s\n", *resp.Tasks[0].TaskArn)
return &TestTask{resp.Tasks[0]}, nil
}
// ResolveTaskDockerID determines the Docker ID for a container within a given
// task that has been run by the Agent.
func (agent *TestAgent) ResolveTaskDockerID(task *TestTask, containerName string) (string, error) {
var err error
var dockerId string
for i := 0; i < 5; i++ {
dockerId, err = agent.resolveTaskDockerID(task, containerName)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return dockerId, err
}
func (agent *TestAgent) resolveTaskDockerID(task *TestTask, containerName string) (string, error) {
bodyData, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return "", err
}
var taskResp handlers.TaskResponse | if len(taskResp.Containers) == 0 {
return "", errors.New("No containers in task response")
}
for _, container := range taskResp.Containers {
if container.Name == containerName {
return container.DockerId, nil
}
}
return "", errors.New("No containers matched given name")
}
func (agent *TestAgent) WaitStoppedViaIntrospection(task *TestTask) (bool, error) {
var err error
var isStopped bool
for i := 0; i < 5; i++ {
isStopped, err = agent.waitStoppedViaIntrospection(task)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return isStopped, err
}
func (agent *TestAgent) waitStoppedViaIntrospection(task *TestTask) (bool, error) {
rawResponse, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return false, err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*rawResponse, &taskResp)
if taskResp.KnownStatus == "STOPPED" {
return true, nil
} else {
return false, errors.New("Task should be STOPPED but is " + taskResp.KnownStatus)
}
}
func (agent *TestAgent) WaitRunningViaIntrospection(task *TestTask) (bool, error) {
var err error
var isRunning bool
for i := 0; i < 5; i++ {
isRunning, err = agent.waitRunningViaIntrospection(task)
if err == nil && isRunning {
break
}
time.Sleep(10000 * time.Millisecond)
}
return isRunning, err
}
func (agent *TestAgent) waitRunningViaIntrospection(task *TestTask) (bool, error) {
rawResponse, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return false, err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*rawResponse, &taskResp)
if taskResp.KnownStatus == "RUNNING" {
return true, nil
} else {
return false, errors.New("Task should be RUNNING but is " + taskResp.KnownStatus)
}
}
func (agent *TestAgent) callTaskIntrospectionApi(taskArn string) (*[]byte, error) {
fullIntrospectionApiURL := agent.IntrospectionURL + "/v1/tasks"
if taskArn != "" {
fullIntrospectionApiURL += "?taskarn=" + taskArn
}
agentTasksResp, err := http.Get(fullIntrospectionApiURL)
if err != nil {
return nil, err
}
bodyData, err := ioutil.ReadAll(agentTasksResp.Body)
if err != nil {
return nil, err
}
return &bodyData, nil
}
func (agent *TestAgent) RequireVersion(version string) {
if agent.Version == "UNKNOWN" {
agent.t.Skipf("Skipping test requiring version %v; agent version unknown", version)
}
matches, err := Version(agent.Version).Matches(version)
if err != nil {
agent.t.Skipf("Skipping test requiring version %v; could not compare because of error: %v", version, err)
}
if !matches {
agent.t.Skipf("Skipping test requiring version %v; agent version %v", version, agent.Version)
}
}
type TestTask struct {
*ecs.Task
}
func (task *TestTask) Redescribe() {
res, err := ECS.DescribeTasks(&ecs.DescribeTasksInput{
Cluster: task.ClusterArn,
Tasks: []*string{task.TaskArn},
})
if err == nil && len(res.Failures) == 0 {
task.Task = res.Tasks[0]
}
}
func (task *TestTask) waitStatus(timeout time.Duration, status string) error {
timer := time.NewTimer(timeout)
atStatus := make(chan error, 1)
cancelled := false
go func() {
if *task.LastStatus == "STOPPED" && status != "STOPPED" {
atStatus <- errors.New("Task terminal; will never reach " + status)
return
}
for *task.LastStatus != status && !cancelled {
task.Redescribe()
if *task.LastStatus == status {
break
}
if *task.LastStatus == "STOPPED" && status != "STOPPED" {
atStatus <- errors.New("Task terminal; will never reach " + status)
return
}
time.Sleep(5 * time.Second)
}
atStatus <- nil
}()
select {
case err := <-atStatus:
return err
case <-timer.C:
cancelled = true
return errors.New("Timed out waiting for task to reach " + status + ": " + *task.TaskDefinitionArn + ", " + *task.TaskArn)
}
}
func (task *TestTask) ContainerExitcode(name string) (int, bool) {
for _, cont := range task.Containers {
if cont != nil && cont.Name != nil && cont.ExitCode != nil {
if *cont.Name == name {
return int(*cont.ExitCode), true
}
}
}
return 0, false
}
func (task *TestTask) WaitRunning(timeout time.Duration) error {
return task.waitStatus(timeout, "RUNNING")
}
func (task *TestTask) WaitStopped(timeout time.Duration) error {
return task.waitStatus(timeout, "STOPPED")
}
func (task *TestTask) ExpectErrorType(containerName, errType string, timeout time.Duration) error {
task.WaitStopped(timeout)
for _, container := range task.Containers {
if *container.Name != containerName {
continue
}
if container.Reason == nil {
return errors.New("Expected error reason")
}
errParts := strings.SplitN(*container.Reason, ":", 2)
if len(errParts) != 2 {
return errors.New("Error did not have a type: " + *container.Reason)
}
if errParts[0] != errType {
return errors.New("Type did not match: " + *container.Reason)
}
return nil
}
return errors.New("Could not find container " + containerName + " in task " + *task.TaskArn)
}
func (task *TestTask) Stop() error {
_, err := ECS.StopTask(&ecs.StopTaskInput{
Cluster: task.ClusterArn,
Task: task.TaskArn,
})
return err
}
func RequireDockerVersion(t *testing.T, selector string) {
dockerClient, err := docker.NewClientFromEnv()
if err != nil {
t.Fatalf("Could not get docker client to check version: %v", err)
}
dockerVersion, err := dockerClient.Version()
if err != nil {
t.Fatalf("Could not get docker version: %v", err)
}
version := dockerVersion.Get("Version")
match, err := Version(version).Matches(selector)
if err != nil {
t.Fatalf("Could not check docker version to match required: %v", err)
}
if !match {
t.Skipf("Skipping test; requires %v, but version is %v", selector, version)
}
} | err = json.Unmarshal(*bodyData, &taskResp)
if err != nil {
return "", err
} | random_line_split |
script.js | // 1 zadanie
var exe = document.querySelector('#root');
var newElem = document.createElement('div');
newElem.innerHTML = 'To jest nowy element';
exe.appendChild(newElem);
// 2 zadanie
const myTab = ["Jabłko", "Pomarańcza", "Banan", "Wiśnia", "Granat", "Arbuz"];
const myList = document.createElement('ol');
myList.id = 'myList';
myTab.forEach((a) => {
var myFruit = document.createElement('li');
myFruit.innerText = a;
myList.append(myFruit);
});
newElem.append(myList);
document.body.insertBefore(newElem, document.getElementById('root'));
// 3 zadanie
newElem.addEventListener('click', () => {
Array.from(myList.children).forEach((el, x) => {
if (x % 2 === 1) {
myList.removeChild(el);
}
});
});
// 4 zadanie
const button = document.createElement('button');
button.innerText = 'Click to remove';
button.addEventListener('click', (e) => {
e.target.remove();
});
document.body.appendChild(button);
// 5 zadanie
const rand = Math.floor(Math.random() * 20);
for (let i = 0; i < rand; i++) {
const randDiv = document.createElement('div');
randDiv.innerText = `to jest div numer ${i}`;
document.body.appendChild(randDiv);
}
// 6 zadanie
const myNewObjStr = {
div1: 'to jest div1',
span1: 'to jest span1',
div2: {
div3: 'to jest div3(2)',
},
span2: 'to jest span2'
}
const r00t = document.getElementById('root');
const firstDiv = document.createElement('div');
firstDiv.innerText = myNewObjStr.div1;
const firstSpan = document.createElement('span');
firstSpan.innerText = myNewObjStr.span1;
firstDiv.append(firstSpan);
document.body.append(firstDiv, r00t);
const secondDiv = document.createElement('div');
const thirdDiv = document.createElement('div');
thirdDiv.innerText = myNewObjStr.div2.div3;
const secondSpan = document.createElement('span');
secondSpan.innerText = myNewObjStr.span2;
secondDiv.append(thirdDiv);
secondDiv.append(secondSpan);
document.body.append(secondDiv, r00t);
// 7 zadanie
const favFruits = ['arbuz', 'granat', 'mango', 'banan', 'banan', 'pomarańcza', 'wiśnia'];
const ul1 = document.createElement("ul");
const ul2 = document.createElement("ul");
favFruits.forEach(v => {
const li = document.createElement("li");
li.innerText = v;
ul1.appendChild(li);
});
const lists = [ul1, ul2];
const buttons = [document.createElement("button"), document.createElement("button")];
function check |
lists.forEach((ul, i) => {
if (ul.childElementCount <= 1) {
buttons[i].disabled = true;
} else {
buttons[i].disabled = false;
}
})
}
lists.forEach((ul, i) => {
buttons[i].innerText = 'MOVE';
buttons[i].addEventListener('click', () => {
const listItems = ul.querySelectorAll('li');
const childToTransfer = listItems[listItems.length - 1];
if (i === 0) {
ul2.insertBefore(childToTransfer, buttons[1]);
} else {
ul1.insertBefore(childToTransfer, buttons[0]);
}
checkButtonDisabled();
});
ul.appendChild(buttons[i]);
document.body.appendChild(ul);
});
checkButtonDisabled();
// 8 zadanie
const fieldset = document.createElement('fieldset');
const inputs = [{
label: 'Element',
id: 'el',
type: 'text'
}, {
label: 'Tekst',
id: 'text',
type: 'text'
}, {
label: 'Kolor',
id: 'color',
type: 'color'
}, {
label: 'Ile razy',
id: 'count',
type: 'number'
}, {
label: 'Utwórz',
type: 'submit'
}];
inputs.forEach(v => {
const elInput = document.createElement('input');
let label = document.createElement('hr');
elInput.style.display = 'block';
elInput.type = v.type;
elInput.id = v.id || null;
if (v.type === 'submit') {
elInput.value = v.label;
elInput.addEventListener('click', (e) => {
createElement(e);
});
} else {
label = document.createElement('label');
label.innerText = v.label;
label.for = v.id;
}
fieldset.appendChild(label);
fieldset.appendChild(elInput);
});
function createElement(e) {
e.preventDefault();
let el = null;
inputs.forEach((v) => {
const value = document.getElementById(v.id)?.value;
switch (v.id) {
case 'el': el = document.createElement(value); break;
case 'text': el.innerText = value; break;
case 'color': el.style.color = value; break;
case 'count': for (let i = 1; i <= Number(value); i++) {
document.body.appendChild(el.cloneNode(true));
} break;
}
});
}
document.body.appendChild(fieldset);
// 9 zadanie
var nextFormulage = document.createElement("form");
root.appendChild(nextFormulage);
var yourName = document.createElement("input");
yourName.setAttribute('type', 'text');
yourName.setAttribute('value', 'Name');
var lastname = document.createElement("input");
lastname.setAttribute('type', 'text');
lastname.setAttribute('value', 'Lastname');
var age = document.createElement("input");
age.setAttribute('type', 'text');
age.setAttribute('value', 'Age');
var howManyKids = document.createElement("input");
howManyKids.setAttribute('type', 'text');
howManyKids.setAttribute('value', 'HowManyKids');
nextFormulage.appendChild(yourName);
nextFormulage.appendChild(lastname);
nextFormulage.appendChild(age);
nextFormulage.appendChild(howManyKids);
var moreButton = document.createElement('button');
moreButton.id = "more"
moreButton.type = 'button'
moreButton.innerText = "Więcej";
nextFormulage.appendChild(moreButton);
var createButton = document.createElement('button');
createButton.id = "create"
createButton.type = 'button'
createButton.innerText = "Utwórz";
nextFormulage.appendChild(createButton);
var nameTab = [];
var lastnameTab = [];
var ageTab = [];
var kidsTab = [];
function moreFields(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
}
document.querySelector('#more').addEventListener('click', moreFields);
function createTable(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
var tab = document.createElement("table");
var header = document.createElement('tr');
tab.appendChild(header);
var name = document.createElement('th');
name.innerHTML = "Name";
var lastName = document.createElement('th');
lastName.innerHTML = "Lastname";
var age1 = document.createElement('th');
age1.innerHTML = "Age";
var kids = document.createElement('th');
kids.innerHTML = "HowManyKids";
root.appendChild(tab);
header.appendChild(name);
header.appendChild(lastName);
header.appendChild(age1);
header.appendChild(kids);
for (var i = 0; i < nameTab.length; i++) {
var item = document.createElement('tr');
tab.appendChild(item);
var del = document.createElement('button');
del.innerText = "Usuń";
item.appendChild(del);
var newName = document.createElement('td');
newName.innerText = nameTab[i];
var newLastname = document.createElement('td');
newLastname.innerText = lastnameTab[i];
var newAge = document.createElement('td');
newAge.innerText = ageTab[i];
var newKids = document.createElement('td');
newKids.innerText= kidsTab[i];
item.appendChild(newName);
item.appendChild(newLastname);
item.appendChild(newAge);
item.appendChild(newKids);
item.appendChild(del);
del.addEventListener('click', deleteA);
}
nameTab = [];
lastnameTab = [];
ageTab = [];
kidsTab = [];
}
function deleteA(e) {
e.target .parentElement.remove()
}
document.querySelector('#create').addEventListener('click', createTable);
// 10 zadanie
let changeButton = document.createElement('button');
changeButton.id = "change"
changeButton.type = 'button'
changeButton.innerText = "Użyj dużych liter!";
root.appendChild(changeButton);
function changeLetters() {
document.querySelectorAll('tr').forEach((row) => {
row.querySelectorAll('td').forEach((cell) => {
cell.innerText = cell.innerText[0].toUpperCase() + cell.innerText.slice(1);
})
})
}
document.querySelector('#change').addEventListener('click', changeLetters);
// 11 zadanie
function extarctNumbersAndMultiplyToDivs(str) {
const numbers = str.match(/[0-9]+/g);
if (numbers.length > 0) {
console.log(numbers.reduce((a, b) => Number(a) + Number(b)));
const mmultiplier = numbers.reduce((a, b) => Number(a) * Number(b));
for (let i = 0; i < mmultiplier; i++) {
const div = document.createElement('div');
div.innerText = `div${i}`;
document.body.appendChild(div);
}
}
}
extarctNumbersAndMultiplyToDivs('foo1bar2test10nice2');
// 12 zadanie
function createObj(str) {
return {
string: str
}
}
const alaStr = createObj('Ala ma kota');
alaStr.alaToOla = function () {
if (this.string.includes('Ala')) {
this.string = this.string.replaceAll('Ala', 'Ola');
console.log(this.string);
} else {
const div = document.createElement('div');
div.innerText = 'Słowo Ala nie występuje w tekście.';
document.body.appendChild(div);
}
}
alaStr.alaToOla();
// 13 zadanie
function countForMe(stringArr){
var howManyLetters = [];
for(var i = 0; i < stringArr.length; i++){
howManyLetters[i] = stringArr[i].length;
}
return howManyLetters;
}
function showAvg(summ){
var average = summ / tabWithWords.length;
return average;
}
function sum(howManyLetters){
var summ = howManyLetters.reduce((prev,curr) => prev += curr);
return summ;
}
var tabWithWords = ['Mornings','are','for','coffee','and', 'contemplation', 'StrangerThings'];
console.log("Letters in each word:");
console.log(countForMe(tabWithWords));
console.log("Sum of letters:");
console.log(sum(countForMe(tabWithWords)));
console.log("Average:");
console.log(showAvg(sum(countForMe(tabWithWords))));
// 14 zadanie
let obj = {
name: '',
surname: ''
};
function modifyObj(name, surname) {
obj.name = name;
obj.surname = surname;
obj.nameLength = name.length;
obj.surnameLength = surname.length;
if (name.length > 5 || surname.length > 5) {
const btn = document.createElement('button');
btn.innerText = 'Restore';
btn.addEventListener('click', (e) => {
obj = {
name: '',
surname: ''
}
});
document.body.appendChild(btn)
}
}
modifyObj('Michał', 'Orłowski');
| ButtonDisabled() {
| identifier_name |
script.js | // 1 zadanie
var exe = document.querySelector('#root');
var newElem = document.createElement('div');
newElem.innerHTML = 'To jest nowy element';
exe.appendChild(newElem);
// 2 zadanie
const myTab = ["Jabłko", "Pomarańcza", "Banan", "Wiśnia", "Granat", "Arbuz"];
const myList = document.createElement('ol');
myList.id = 'myList';
myTab.forEach((a) => {
var myFruit = document.createElement('li');
myFruit.innerText = a;
myList.append(myFruit);
});
newElem.append(myList);
document.body.insertBefore(newElem, document.getElementById('root'));
// 3 zadanie
newElem.addEventListener('click', () => {
Array.from(myList.children).forEach((el, x) => {
if (x % 2 === 1) {
myList.removeChild(el);
}
});
});
// 4 zadanie
const button = document.createElement('button');
button.innerText = 'Click to remove';
button.addEventListener('click', (e) => {
e.target.remove();
});
document.body.appendChild(button);
// 5 zadanie
const rand = Math.floor(Math.random() * 20);
for (let i = 0; i < rand; i++) {
const randDiv = document.createElement('div');
randDiv.innerText = `to jest div numer ${i}`;
document.body.appendChild(randDiv);
}
// 6 zadanie
const myNewObjStr = {
div1: 'to jest div1',
span1: 'to jest span1',
div2: {
div3: 'to jest div3(2)',
},
span2: 'to jest span2'
}
const r00t = document.getElementById('root');
const firstDiv = document.createElement('div');
firstDiv.innerText = myNewObjStr.div1;
const firstSpan = document.createElement('span');
firstSpan.innerText = myNewObjStr.span1;
firstDiv.append(firstSpan);
document.body.append(firstDiv, r00t);
const secondDiv = document.createElement('div');
const thirdDiv = document.createElement('div');
thirdDiv.innerText = myNewObjStr.div2.div3;
const secondSpan = document.createElement('span');
secondSpan.innerText = myNewObjStr.span2;
secondDiv.append(thirdDiv);
secondDiv.append(secondSpan);
document.body.append(secondDiv, r00t);
// 7 zadanie
const favFruits = ['arbuz', 'granat', 'mango', 'banan', 'banan', 'pomarańcza', 'wiśnia'];
const ul1 = document.createElement("ul");
const ul2 = document.createElement("ul");
favFruits.forEach(v => {
const li = document.createElement("li");
li.innerText = v;
ul1.appendChild(li);
});
const lists = [ul1, ul2];
const buttons = [document.createElement("button"), document.createElement("button")];
function checkButtonDisabled() {
lists.forEach((ul, i) => {
if (ul.childElementCount <= 1) {
buttons[i].disabled = true;
} else {
buttons[i].disabled = false;
}
})
}
lists.forEach((ul, i) => {
buttons[i].innerText = 'MOVE';
buttons[i].addEventListener('click', () => {
const listItems = ul.querySelectorAll('li');
const childToTransfer = listItems[listItems.length - 1];
if (i === 0) {
ul2.insertBefore(childToTransfer, buttons[1]);
} else {
ul1.insertBefore(childToTransfer, buttons[0]);
}
checkButtonDisabled();
});
ul.appendChild(buttons[i]);
document.body.appendChild(ul);
});
checkButtonDisabled();
// 8 zadanie
const fieldset = document.createElement('fieldset');
const inputs = [{
label: 'Element',
id: 'el',
type: 'text'
}, {
label: 'Tekst',
id: 'text',
type: 'text'
}, {
label: 'Kolor',
id: 'color',
type: 'color'
}, {
label: 'Ile razy',
id: 'count',
type: 'number'
}, {
label: 'Utwórz',
type: 'submit'
}];
inputs.forEach(v => {
const elInput = document.createElement('input');
let label = document.createElement('hr');
elInput.style.display = 'block';
elInput.type = v.type;
elInput.id = v.id || null;
if (v.type === 'submit') {
elInput.value = v.label;
elInput.addEventListener('click', (e) => {
createElement(e);
});
} else {
label = document.createElement('label');
label.innerText = v.label;
label.for = v.id;
}
fieldset.appendChild(label);
fieldset.appendChild(elInput);
});
function createElement(e) {
e.preventDefault();
let el = null;
inputs.forEach((v) => {
const value = document.getElementById(v.id)?.value;
switch (v.id) {
case 'el': el = document.createElement(value); break;
case 'text': el.innerText = value; break;
case 'color': el.style.color = value; break;
case 'count': for (let i = 1; i <= Number(value); i++) {
document.body.appendChild(el.cloneNode(true));
} break;
}
});
}
document.body.appendChild(fieldset);
// 9 zadanie
var nextFormulage = document.createElement("form");
root.appendChild(nextFormulage);
var yourName = document.createElement("input");
yourName.setAttribute('type', 'text');
yourName.setAttribute('value', 'Name');
var lastname = document.createElement("input");
lastname.setAttribute('type', 'text');
lastname.setAttribute('value', 'Lastname');
var age = document.createElement("input");
age.setAttribute('type', 'text');
age.setAttribute('value', 'Age');
var howManyKids = document.createElement("input");
howManyKids.setAttribute('type', 'text');
howManyKids.setAttribute('value', 'HowManyKids');
nextFormulage.appendChild(yourName);
nextFormulage.appendChild(lastname);
nextFormulage.appendChild(age);
nextFormulage.appendChild(howManyKids);
var moreButton = document.createElement('button');
moreButton.id = "more"
moreButton.type = 'button'
moreButton.innerText = "Więcej";
nextFormulage.appendChild(moreButton);
var createButton = document.createElement('button');
createButton.id = "create"
createButton.type = 'button'
createButton.innerText = "Utwórz";
nextFormulage.appendChild(createButton);
var nameTab = [];
var lastnameTab = [];
var ageTab = [];
var kidsTab = [];
function moreFields(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
}
document.querySelector('#more').addEventListener('click', moreFields);
function createTable(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
var tab = document.createElement("table");
var header = document.createElement('tr');
tab.appendChild(header);
var name = document.createElement('th');
name.innerHTML = "Name";
var lastName = document.createElement('th');
lastName.innerHTML = "Lastname";
var age1 = document.createElement('th');
age1.innerHTML = "Age";
var kids = document.createElement('th');
kids.innerHTML = "HowManyKids";
root.appendChild(tab);
header.appendChild(name);
header.appendChild(lastName);
header.appendChild(age1);
header.appendChild(kids);
for (var i = 0; i < nameTab.length; i++) {
var item = document.createElement('tr');
tab.appendChild(item);
var del = document.createElement('button');
del.innerText = "Usuń";
item.appendChild(del);
var newName = document.createElement('td');
newName.innerText = nameTab[i];
var newLastname = document.createElement('td');
newLastname.innerText = lastnameTab[i];
var newAge = document.createElement('td');
newAge.innerText = ageTab[i];
var newKids = document.createElement('td');
newKids.innerText= kidsTab[i];
item.appendChild(newName);
item.appendChild(newLastname);
item.appendChild(newAge);
item.appendChild(newKids);
item.appendChild(del);
del.addEventListener('click', deleteA);
}
nameTab = [];
lastnameTab = [];
ageTab = [];
kidsTab = [];
}
function deleteA(e) {
e.target .parentElement.remove()
}
document.querySelector('#create').addEventListener('click', createTable);
// 10 zadanie
let changeButton = document.createElement('button');
changeButton.id = "change"
changeButton.type = 'button'
changeButton.innerText = "Użyj dużych liter!";
root.appendChild(changeButton);
function changeLetters() {
document.querySelectorAll('tr').forEach((row) => {
row.querySelectorAll('td').forEach((cell) => {
cell.innerText = cell.innerText[0].toUpperCase() + cell.innerText.slice(1);
})
})
}
document.querySelector('#change').addEventListener('click', changeLetters);
// 11 zadanie
function extarctNumbersAndMultiplyToDivs(str) {
const numbers = str.match(/[0-9]+/g);
if (numbers.length > 0) {
console.log(numbers.reduce((a, b) => Number(a) + Number(b)));
const mmultiplier = numbers.reduce((a, b) => Number(a) * Number(b));
for (let i = 0; i < mmultiplier; i++) {
const div = document.createElement('div');
div.innerText = `div${i}`;
document.body.appendChild(div);
}
}
}
extarctNumbersAndMultiplyToDivs('foo1bar2test10nice2');
// 12 zadanie
function createObj(str) {
return {
string: str
}
}
const alaStr = createObj('Ala ma kota');
alaStr.alaToOla = function () {
if (this.string.includes('Ala')) {
this.string = this.string.replaceAll('Ala', 'Ola');
console.log(this.string);
} else {
const div = document.createElement('div');
div.innerText = 'Słowo Ala nie występuje w tekście.';
document.body.appendChild(div);
}
}
alaStr.alaToOla();
// 13 zadanie
function countForMe(stringArr){
var howManyLetters = [];
for(var i = 0; i < stringArr.length; i++){
how | owManyLetters;
}
function showAvg(summ){
var average = summ / tabWithWords.length;
return average;
}
function sum(howManyLetters){
var summ = howManyLetters.reduce((prev,curr) => prev += curr);
return summ;
}
var tabWithWords = ['Mornings','are','for','coffee','and', 'contemplation', 'StrangerThings'];
console.log("Letters in each word:");
console.log(countForMe(tabWithWords));
console.log("Sum of letters:");
console.log(sum(countForMe(tabWithWords)));
console.log("Average:");
console.log(showAvg(sum(countForMe(tabWithWords))));
// 14 zadanie
let obj = {
name: '',
surname: ''
};
function modifyObj(name, surname) {
obj.name = name;
obj.surname = surname;
obj.nameLength = name.length;
obj.surnameLength = surname.length;
if (name.length > 5 || surname.length > 5) {
const btn = document.createElement('button');
btn.innerText = 'Restore';
btn.addEventListener('click', (e) => {
obj = {
name: '',
surname: ''
}
});
document.body.appendChild(btn)
}
}
modifyObj('Michał', 'Orłowski');
| ManyLetters[i] = stringArr[i].length;
}
return h | conditional_block |
script.js | // 1 zadanie
var exe = document.querySelector('#root');
var newElem = document.createElement('div');
newElem.innerHTML = 'To jest nowy element';
exe.appendChild(newElem);
// 2 zadanie
const myTab = ["Jabłko", "Pomarańcza", "Banan", "Wiśnia", "Granat", "Arbuz"];
const myList = document.createElement('ol');
myList.id = 'myList';
myTab.forEach((a) => {
var myFruit = document.createElement('li');
myFruit.innerText = a;
myList.append(myFruit);
});
newElem.append(myList);
document.body.insertBefore(newElem, document.getElementById('root'));
// 3 zadanie
newElem.addEventListener('click', () => {
Array.from(myList.children).forEach((el, x) => {
if (x % 2 === 1) {
myList.removeChild(el);
}
});
});
// 4 zadanie
const button = document.createElement('button');
button.innerText = 'Click to remove';
button.addEventListener('click', (e) => {
e.target.remove();
});
document.body.appendChild(button);
// 5 zadanie
const rand = Math.floor(Math.random() * 20);
for (let i = 0; i < rand; i++) {
const randDiv = document.createElement('div');
randDiv.innerText = `to jest div numer ${i}`;
document.body.appendChild(randDiv);
}
// 6 zadanie
const myNewObjStr = {
div1: 'to jest div1',
span1: 'to jest span1',
div2: {
div3: 'to jest div3(2)',
},
span2: 'to jest span2'
}
const r00t = document.getElementById('root');
const firstDiv = document.createElement('div');
firstDiv.innerText = myNewObjStr.div1;
const firstSpan = document.createElement('span');
firstSpan.innerText = myNewObjStr.span1;
firstDiv.append(firstSpan);
document.body.append(firstDiv, r00t);
const secondDiv = document.createElement('div');
const thirdDiv = document.createElement('div');
thirdDiv.innerText = myNewObjStr.div2.div3;
const secondSpan = document.createElement('span');
secondSpan.innerText = myNewObjStr.span2;
secondDiv.append(thirdDiv);
secondDiv.append(secondSpan);
document.body.append(secondDiv, r00t);
// 7 zadanie
const favFruits = ['arbuz', 'granat', 'mango', 'banan', 'banan', 'pomarańcza', 'wiśnia'];
const ul1 = document.createElement("ul");
const ul2 = document.createElement("ul");
favFruits.forEach(v => {
const li = document.createElement("li");
li.innerText = v;
ul1.appendChild(li);
});
const lists = [ul1, ul2];
const buttons = [document.createElement("button"), document.createElement("button")];
function checkButtonDisabled() {
lists.forEach((ul, i) => {
if (ul.childElementCount <= 1) {
buttons[i].disabled = true;
} else {
buttons[i].disabled = false;
}
})
}
lists.forEach((ul, i) => {
buttons[i].innerText = 'MOVE';
buttons[i].addEventListener('click', () => {
const listItems = ul.querySelectorAll('li');
const childToTransfer = listItems[listItems.length - 1];
if (i === 0) {
ul2.insertBefore(childToTransfer, buttons[1]);
} else {
ul1.insertBefore(childToTransfer, buttons[0]);
}
checkButtonDisabled();
});
ul.appendChild(buttons[i]);
document.body.appendChild(ul);
});
checkButtonDisabled();
// 8 zadanie
const fieldset = document.createElement('fieldset');
const inputs = [{
label: 'Element',
id: 'el',
type: 'text'
}, {
label: 'Tekst',
id: 'text',
type: 'text'
}, {
label: 'Kolor',
id: 'color',
type: 'color'
}, {
label: 'Ile razy',
id: 'count',
type: 'number'
}, {
label: 'Utwórz',
type: 'submit'
}];
inputs.forEach(v => {
const elInput = document.createElement('input');
let label = document.createElement('hr');
elInput.style.display = 'block';
elInput.type = v.type;
elInput.id = v.id || null;
if (v.type === 'submit') {
elInput.value = v.label;
elInput.addEventListener('click', (e) => {
createElement(e);
});
} else {
label = document.createElement('label');
label.innerText = v.label;
label.for = v.id;
}
fieldset.appendChild(label);
fieldset.appendChild(elInput);
});
function createElement(e) {
e.preventDefault();
let el = null;
inputs.forEach((v) => {
const value = document.getElementById(v.id)?.value;
switch (v.id) {
case 'el': el = document.createElement(value); break;
case 'text': el.innerText = value; break;
case 'color': el.style.color = value; break;
case 'count': for (let i = 1; i <= Number(value); i++) {
document.body.appendChild(el.cloneNode(true));
} break;
}
});
}
document.body.appendChild(fieldset);
// 9 zadanie
var nextFormulage = document.createElement("form");
root.appendChild(nextFormulage);
var yourName = document.createElement("input");
yourName.setAttribute('type', 'text');
yourName.setAttribute('value', 'Name');
var lastname = document.createElement("input");
lastname.setAttribute('type', 'text');
lastname.setAttribute('value', 'Lastname');
var age = document.createElement("input");
age.setAttribute('type', 'text');
age.setAttribute('value', 'Age');
var howManyKids = document.createElement("input");
howManyKids.setAttribute('type', 'text');
howManyKids.setAttribute('value', 'HowManyKids');
nextFormulage.appendChild(yourName);
nextFormulage.appendChild(lastname);
nextFormulage.appendChild(age);
nextFormulage.appendChild(howManyKids);
var moreButton = document.createElement('button');
moreButton.id = "more"
moreButton.type = 'button'
moreButton.innerText = "Więcej";
nextFormulage.appendChild(moreButton);
var createButton = document.createElement('button');
createButton.id = "create"
createButton.type = 'button'
createButton.innerText = "Utwórz";
nextFormulage.appendChild(createButton);
var nameTab = [];
var lastnameTab = [];
var ageTab = [];
|
function moreFields(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
}
document.querySelector('#more').addEventListener('click', moreFields);
function createTable(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
var tab = document.createElement("table");
var header = document.createElement('tr');
tab.appendChild(header);
var name = document.createElement('th');
name.innerHTML = "Name";
var lastName = document.createElement('th');
lastName.innerHTML = "Lastname";
var age1 = document.createElement('th');
age1.innerHTML = "Age";
var kids = document.createElement('th');
kids.innerHTML = "HowManyKids";
root.appendChild(tab);
header.appendChild(name);
header.appendChild(lastName);
header.appendChild(age1);
header.appendChild(kids);
for (var i = 0; i < nameTab.length; i++) {
var item = document.createElement('tr');
tab.appendChild(item);
var del = document.createElement('button');
del.innerText = "Usuń";
item.appendChild(del);
var newName = document.createElement('td');
newName.innerText = nameTab[i];
var newLastname = document.createElement('td');
newLastname.innerText = lastnameTab[i];
var newAge = document.createElement('td');
newAge.innerText = ageTab[i];
var newKids = document.createElement('td');
newKids.innerText= kidsTab[i];
item.appendChild(newName);
item.appendChild(newLastname);
item.appendChild(newAge);
item.appendChild(newKids);
item.appendChild(del);
del.addEventListener('click', deleteA);
}
nameTab = [];
lastnameTab = [];
ageTab = [];
kidsTab = [];
}
function deleteA(e) {
e.target .parentElement.remove()
}
document.querySelector('#create').addEventListener('click', createTable);
// 10 zadanie
let changeButton = document.createElement('button');
changeButton.id = "change"
changeButton.type = 'button'
changeButton.innerText = "Użyj dużych liter!";
root.appendChild(changeButton);
function changeLetters() {
document.querySelectorAll('tr').forEach((row) => {
row.querySelectorAll('td').forEach((cell) => {
cell.innerText = cell.innerText[0].toUpperCase() + cell.innerText.slice(1);
})
})
}
document.querySelector('#change').addEventListener('click', changeLetters);
// 11 zadanie
function extarctNumbersAndMultiplyToDivs(str) {
const numbers = str.match(/[0-9]+/g);
if (numbers.length > 0) {
console.log(numbers.reduce((a, b) => Number(a) + Number(b)));
const mmultiplier = numbers.reduce((a, b) => Number(a) * Number(b));
for (let i = 0; i < mmultiplier; i++) {
const div = document.createElement('div');
div.innerText = `div${i}`;
document.body.appendChild(div);
}
}
}
extarctNumbersAndMultiplyToDivs('foo1bar2test10nice2');
// 12 zadanie
function createObj(str) {
return {
string: str
}
}
const alaStr = createObj('Ala ma kota');
alaStr.alaToOla = function () {
if (this.string.includes('Ala')) {
this.string = this.string.replaceAll('Ala', 'Ola');
console.log(this.string);
} else {
const div = document.createElement('div');
div.innerText = 'Słowo Ala nie występuje w tekście.';
document.body.appendChild(div);
}
}
alaStr.alaToOla();
// 13 zadanie
function countForMe(stringArr){
var howManyLetters = [];
for(var i = 0; i < stringArr.length; i++){
howManyLetters[i] = stringArr[i].length;
}
return howManyLetters;
}
function showAvg(summ){
var average = summ / tabWithWords.length;
return average;
}
function sum(howManyLetters){
var summ = howManyLetters.reduce((prev,curr) => prev += curr);
return summ;
}
var tabWithWords = ['Mornings','are','for','coffee','and', 'contemplation', 'StrangerThings'];
console.log("Letters in each word:");
console.log(countForMe(tabWithWords));
console.log("Sum of letters:");
console.log(sum(countForMe(tabWithWords)));
console.log("Average:");
console.log(showAvg(sum(countForMe(tabWithWords))));
// 14 zadanie
let obj = {
name: '',
surname: ''
};
function modifyObj(name, surname) {
obj.name = name;
obj.surname = surname;
obj.nameLength = name.length;
obj.surnameLength = surname.length;
if (name.length > 5 || surname.length > 5) {
const btn = document.createElement('button');
btn.innerText = 'Restore';
btn.addEventListener('click', (e) => {
obj = {
name: '',
surname: ''
}
});
document.body.appendChild(btn)
}
}
modifyObj('Michał', 'Orłowski'); | var kidsTab = [];
| random_line_split |
script.js | // 1 zadanie
var exe = document.querySelector('#root');
var newElem = document.createElement('div');
newElem.innerHTML = 'To jest nowy element';
exe.appendChild(newElem);
// 2 zadanie
const myTab = ["Jabłko", "Pomarańcza", "Banan", "Wiśnia", "Granat", "Arbuz"];
const myList = document.createElement('ol');
myList.id = 'myList';
myTab.forEach((a) => {
var myFruit = document.createElement('li');
myFruit.innerText = a;
myList.append(myFruit);
});
newElem.append(myList);
document.body.insertBefore(newElem, document.getElementById('root'));
// 3 zadanie
newElem.addEventListener('click', () => {
Array.from(myList.children).forEach((el, x) => {
if (x % 2 === 1) {
myList.removeChild(el);
}
});
});
// 4 zadanie
const button = document.createElement('button');
button.innerText = 'Click to remove';
button.addEventListener('click', (e) => {
e.target.remove();
});
document.body.appendChild(button);
// 5 zadanie
const rand = Math.floor(Math.random() * 20);
for (let i = 0; i < rand; i++) {
const randDiv = document.createElement('div');
randDiv.innerText = `to jest div numer ${i}`;
document.body.appendChild(randDiv);
}
// 6 zadanie
const myNewObjStr = {
div1: 'to jest div1',
span1: 'to jest span1',
div2: {
div3: 'to jest div3(2)',
},
span2: 'to jest span2'
}
const r00t = document.getElementById('root');
const firstDiv = document.createElement('div');
firstDiv.innerText = myNewObjStr.div1;
const firstSpan = document.createElement('span');
firstSpan.innerText = myNewObjStr.span1;
firstDiv.append(firstSpan);
document.body.append(firstDiv, r00t);
const secondDiv = document.createElement('div');
const thirdDiv = document.createElement('div');
thirdDiv.innerText = myNewObjStr.div2.div3;
const secondSpan = document.createElement('span');
secondSpan.innerText = myNewObjStr.span2;
secondDiv.append(thirdDiv);
secondDiv.append(secondSpan);
document.body.append(secondDiv, r00t);
// 7 zadanie
const favFruits = ['arbuz', 'granat', 'mango', 'banan', 'banan', 'pomarańcza', 'wiśnia'];
const ul1 = document.createElement("ul");
const ul2 = document.createElement("ul");
favFruits.forEach(v => {
const li = document.createElement("li");
li.innerText = v;
ul1.appendChild(li);
});
const lists = [ul1, ul2];
const buttons = [document.createElement("button"), document.createElement("button")];
function checkButtonDisabled() {
lists.forEach((ul, i) => {
if (ul.childElementCount <= 1) {
buttons[i].disabled = true;
} else {
buttons[i].disabled = false;
}
})
}
lists.forEach((ul, i) => {
buttons[i].innerText = 'MOVE';
buttons[i].addEventListener('click', () => {
const listItems = ul.querySelectorAll('li');
const childToTransfer = listItems[listItems.length - 1];
if (i === 0) {
ul2.insertBefore(childToTransfer, buttons[1]);
} else {
ul1.insertBefore(childToTransfer, buttons[0]);
}
checkButtonDisabled();
});
ul.appendChild(buttons[i]);
document.body.appendChild(ul);
});
checkButtonDisabled();
// 8 zadanie
const fieldset = document.createElement('fieldset');
const inputs = [{
label: 'Element',
id: 'el',
type: 'text'
}, {
label: 'Tekst',
id: 'text',
type: 'text'
}, {
label: 'Kolor',
id: 'color',
type: 'color'
}, {
label: 'Ile razy',
id: 'count',
type: 'number'
}, {
label: 'Utwórz',
type: 'submit'
}];
inputs.forEach(v => {
const elInput = document.createElement('input');
let label = document.createElement('hr');
elInput.style.display = 'block';
elInput.type = v.type;
elInput.id = v.id || null;
if (v.type === 'submit') {
elInput.value = v.label;
elInput.addEventListener('click', (e) => {
createElement(e);
});
} else {
label = document.createElement('label');
label.innerText = v.label;
label.for = v.id;
}
fieldset.appendChild(label);
fieldset.appendChild(elInput);
});
function createElement(e) {
e.preventDefault();
let el = null;
inputs.forEach((v) => {
const value = document.getElementById(v.id)?.value;
switch (v.id) {
case 'el': el = document.createElement(value); break;
case 'text': el.innerText = value; break;
case 'color': el.style.color = value; break;
case 'count': for (let i = 1; i <= Number(value); i++) {
document.body.appendChild(el.cloneNode(true));
} break;
}
});
}
document.body.appendChild(fieldset);
// 9 zadanie
var nextFormulage = document.createElement("form");
root.appendChild(nextFormulage);
var yourName = document.createElement("input");
yourName.setAttribute('type', 'text');
yourName.setAttribute('value', 'Name');
var lastname = document.createElement("input");
lastname.setAttribute('type', 'text');
lastname.setAttribute('value', 'Lastname');
var age = document.createElement("input");
age.setAttribute('type', 'text');
age.setAttribute('value', 'Age');
var howManyKids = document.createElement("input");
howManyKids.setAttribute('type', 'text');
howManyKids.setAttribute('value', 'HowManyKids');
nextFormulage.appendChild(yourName);
nextFormulage.appendChild(lastname);
nextFormulage.appendChild(age);
nextFormulage.appendChild(howManyKids);
var moreButton = document.createElement('button');
moreButton.id = "more"
moreButton.type = 'button'
moreButton.innerText = "Więcej";
nextFormulage.appendChild(moreButton);
var createButton = document.createElement('button');
createButton.id = "create"
createButton.type = 'button'
createButton.innerText = "Utwórz";
nextFormulage.appendChild(createButton);
var nameTab = [];
var lastnameTab = [];
var ageTab = [];
var kidsTab = [];
function moreFields(){
n | nt.querySelector('#more').addEventListener('click', moreFields);
function createTable(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
var tab = document.createElement("table");
var header = document.createElement('tr');
tab.appendChild(header);
var name = document.createElement('th');
name.innerHTML = "Name";
var lastName = document.createElement('th');
lastName.innerHTML = "Lastname";
var age1 = document.createElement('th');
age1.innerHTML = "Age";
var kids = document.createElement('th');
kids.innerHTML = "HowManyKids";
root.appendChild(tab);
header.appendChild(name);
header.appendChild(lastName);
header.appendChild(age1);
header.appendChild(kids);
for (var i = 0; i < nameTab.length; i++) {
var item = document.createElement('tr');
tab.appendChild(item);
var del = document.createElement('button');
del.innerText = "Usuń";
item.appendChild(del);
var newName = document.createElement('td');
newName.innerText = nameTab[i];
var newLastname = document.createElement('td');
newLastname.innerText = lastnameTab[i];
var newAge = document.createElement('td');
newAge.innerText = ageTab[i];
var newKids = document.createElement('td');
newKids.innerText= kidsTab[i];
item.appendChild(newName);
item.appendChild(newLastname);
item.appendChild(newAge);
item.appendChild(newKids);
item.appendChild(del);
del.addEventListener('click', deleteA);
}
nameTab = [];
lastnameTab = [];
ageTab = [];
kidsTab = [];
}
function deleteA(e) {
e.target .parentElement.remove()
}
document.querySelector('#create').addEventListener('click', createTable);
// 10 zadanie
let changeButton = document.createElement('button');
changeButton.id = "change"
changeButton.type = 'button'
changeButton.innerText = "Użyj dużych liter!";
root.appendChild(changeButton);
function changeLetters() {
document.querySelectorAll('tr').forEach((row) => {
row.querySelectorAll('td').forEach((cell) => {
cell.innerText = cell.innerText[0].toUpperCase() + cell.innerText.slice(1);
})
})
}
document.querySelector('#change').addEventListener('click', changeLetters);
// 11 zadanie
function extarctNumbersAndMultiplyToDivs(str) {
const numbers = str.match(/[0-9]+/g);
if (numbers.length > 0) {
console.log(numbers.reduce((a, b) => Number(a) + Number(b)));
const mmultiplier = numbers.reduce((a, b) => Number(a) * Number(b));
for (let i = 0; i < mmultiplier; i++) {
const div = document.createElement('div');
div.innerText = `div${i}`;
document.body.appendChild(div);
}
}
}
extarctNumbersAndMultiplyToDivs('foo1bar2test10nice2');
// 12 zadanie
function createObj(str) {
return {
string: str
}
}
const alaStr = createObj('Ala ma kota');
alaStr.alaToOla = function () {
if (this.string.includes('Ala')) {
this.string = this.string.replaceAll('Ala', 'Ola');
console.log(this.string);
} else {
const div = document.createElement('div');
div.innerText = 'Słowo Ala nie występuje w tekście.';
document.body.appendChild(div);
}
}
alaStr.alaToOla();
// 13 zadanie
function countForMe(stringArr){
var howManyLetters = [];
for(var i = 0; i < stringArr.length; i++){
howManyLetters[i] = stringArr[i].length;
}
return howManyLetters;
}
function showAvg(summ){
var average = summ / tabWithWords.length;
return average;
}
function sum(howManyLetters){
var summ = howManyLetters.reduce((prev,curr) => prev += curr);
return summ;
}
var tabWithWords = ['Mornings','are','for','coffee','and', 'contemplation', 'StrangerThings'];
console.log("Letters in each word:");
console.log(countForMe(tabWithWords));
console.log("Sum of letters:");
console.log(sum(countForMe(tabWithWords)));
console.log("Average:");
console.log(showAvg(sum(countForMe(tabWithWords))));
// 14 zadanie
let obj = {
name: '',
surname: ''
};
function modifyObj(name, surname) {
obj.name = name;
obj.surname = surname;
obj.nameLength = name.length;
obj.surnameLength = surname.length;
if (name.length > 5 || surname.length > 5) {
const btn = document.createElement('button');
btn.innerText = 'Restore';
btn.addEventListener('click', (e) => {
obj = {
name: '',
surname: ''
}
});
document.body.appendChild(btn)
}
}
modifyObj('Michał', 'Orłowski');
| ameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
}
docume | identifier_body |
main.go | package main
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"strings"
"sync"
"time"
"os/signal"
"go-learning/aliyunkafka"
"github.com/Shopify/sarama"
sls "github.com/aliyun/aliyun-log-go-sdk"
"github.com/bsm/sarama-cluster"
"github.com/gogo/protobuf/proto"
)
var cfg *configs.MqConfig
var consumer *cluster.Consumer
var sig chan os.Signal
var loghub *Loghub
func init() {
fmt.Println("init kafka consumer")
var err error
// loghub
lhcfg := &Config{}
flag.StringVar(&lhcfg.LogProject.Name, "projectname", "epaper", "loghub project name")
flag.StringVar(&lhcfg.LogProject.Endpoint, "endpoint", "cn-beijing.log.aliyuncs.com", "loghub endpoint")
flag.StringVar(&lhcfg.LogProject.AccessKeyID, "accesskeyid", "", "loghub AccessKeyID")
flag.StringVar(&lhcfg.LogProject.AccessKeySecret, "accesskeysecret", "", "loghub AccessKeySecret")
cfg := &configs.MqConfig{}
configs.LoadJsonConfig(cfg, "mq.json")
flag.StringVar(&cfg.Ak, "ak", "", "access key")
flag.StringVar(&cfg.Password, "password", "", "password")
flag.Parse()
fmt.Printf("load config: %v\n", cfg)
clusterCfg := cluster.NewConfig()
clusterCfg.Net.SASL.Enable = true
clusterCfg.Net.SASL.User = cfg.Ak
clusterCfg.Net.SASL.Password = cfg.Password
clusterCfg.Net.SASL.Handshake = true
certBytes, err := ioutil.ReadFile(configs.GetFullPath(cfg.CertFile))
clientCertPool := x509.NewCertPool()
ok := clientCertPool.AppendCertsFromPEM(certBytes)
if !ok {
panic("kafka consumer failed to parse root certificate")
}
clusterCfg.Net.TLS.Config = &tls.Config{
//Certificates: []tls.Certificate{},
RootCAs: clientCertPool,
InsecureSkipVerify: true,
}
clusterCfg.Net.TLS.Enable = true
clusterCfg.Consumer.Return.Errors = true
clusterCfg.Consumer.Offsets.Initial = sarama.OffsetOldest
clusterCfg.Group.Return.Notifications = true
clusterCfg.ChannelBufferSize = 1024
clusterCfg.Version = sarama.V0_10_0_0
if err = clusterCfg.Validate(); err != nil {
msg := fmt.Sprintf("Kafka consumer config invalidate. config: %v. err: %v", *clusterCfg, err)
fmt.Println(msg)
panic(msg)
}
consumer, err = cluster.NewConsumer(cfg.Servers, cfg.ConsumerId, cfg.Topics, clusterCfg)
if err != nil {
msg := fmt.Sprintf("Create kafka consumer error: %v. config: %v", err, clusterCfg)
fmt.Println(msg)
panic(msg)
}
sig = make(chan os.Signal, 1)
// loghub
loghub = NewLoghub(lhcfg, consumer)
loghub.Run()
}
func Start() {
go consume()
}
func consume() {
for {
select {
case msg, more := <-consumer.Messages():
if more {
fmt.Printf("kafka consumer msg: (topic:%s) (partition:%d) (offset:%d) (%s): (%s)\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
loghub.Input() <- msg
// consumer.MarkOffset(msg, "completed") // mark message as processed
// fmt.Println("kafka consumer HighWaterMarks", consumer.HighWaterMarks())
}
case err, more := <-consumer.Errors():
if more {
fmt.Printf("Kafka consumer error: %v\n", err.Error())
}
case ntf, more := <-consumer.Notifications():
if more {
fmt.Printf("Kafka consumer rebalance: %v\n", ntf)
}
case <-sig:
fmt.Errorf("Stop consumer server...")
consumer.Close()
return
}
}
}
func Stop(s os.Signal) {
fmt.Println("Recived kafka consumer stop signal...")
sig <- s
fmt.Println("kafka consumer stopped!!!")
}
func main() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
Start()
select {
case s := <-signals:
Stop(s)
}
}
type Config struct {
Name string
LogProject struct {
Name string
Endpoint string
AccessKeyID string
AccessKeySecret string
}
MessageChannelBufferSize int
LogsBufferSize int
Topics []string
LogsBufferSize4Logstore int
Logstores []string
}
type Loghub struct {
Name string
*Config
consumer *cluster.Consumer
logproject *sls.LogProject
logstores map[string]*sls.LogStore
messages chan *sarama.ConsumerMessage
messageChannelBufferSize int
m sync.RWMutex
stop chan int
mlogstoreLogsBuffer sync.RWMutex
logsBuffer4Logstore map[string](chan *topicLog) // by logstore
logsBufferSize4Logstore int
mlogsBuffer sync.RWMutex
// logsBuffer map[string](chan *topicLog) // by topic and logstore
logsBuffer map[string]map[string]chan *topicLog // by topic and logstore
logsBufferSize int
topics []string
}
type topicLog struct {
topic string
log *sls.Log
cmsg *sarama.ConsumerMessage
}
func NewLoghub(cfg *Config, consumer *cluster.Consumer) *Loghub {
logproject := &sls.LogProject{
Name: cfg.LogProject.Name,
Endpoint: cfg.LogProject.Endpoint,
AccessKeyID: cfg.LogProject.AccessKeyID,
AccessKeySecret: cfg.LogProject.AccessKeySecret,
}
lbls := map[string](chan *topicLog){}
lbr := map[string]map[string](chan *topicLog){}
// lbr := map[string](chan *topicLog){}
// for _, tp := range cfg.Topics {
// lbr[tp] = make(chan *topicLog, cfg.LogsBufferSize)
// }
lh := &Loghub{
Name: cfg.Name,
Config: cfg,
consumer: consumer,
logproject: logproject,
messages: make(chan *sarama.ConsumerMessage, cfg.MessageChannelBufferSize),
messageChannelBufferSize: cfg.MessageChannelBufferSize,
stop: make(chan int),
logsBuffer4Logstore: lbls,
logsBufferSize4Logstore: cfg.LogsBufferSize4Logstore,
logstores: map[string]*sls.LogStore{},
logsBuffer: lbr,
logsBufferSize: cfg.LogsBufferSize,
topics: cfg.Topics,
}
return lh
}
func (l *Loghub) Run() {
// lss, err := l.logproject.ListLogStore()
// if err != nil {
// panic(err)
// }
// 开启日志库
for _, lsn := range l.Logstores {
_, err := l.getLogstore(lsn)
if err != nil {
fmt.Printf("Loghub Start failed (logstoreName=%s). err: %v\n", lsn, err)
panic(err)
}
// 分配到topic
go l.dispatchToTopic(lsn)
for _, tp := range l.topics {
go l.processTopicMsg(lsn, tp)
}
}
// 分配消息
go l.dispatch()
}
func (l *Loghub) Input() chan<- *sarama.ConsumerMessage {
return l.messages
}
// dispatchToTopic
func (l *Loghub) dispatchToTopic(logstoreName string) {
// TODO: 处理消息, 分配到不同的topic
channelBuffer := l.getLogstoreLogsBufferChannel(logstoreName)
for {
select {
case log := <-channelBuffer:
l.mlogsBuffer.Lock()
logsCB, ok := l.logsBuffer[logstoreName]
if !ok || logsCB == nil {
logsCB = map[string]chan *topicLog{}
l.logsBuffer[logstoreName] = logsCB
}
logsCBTopic, ok := logsCB[log.topic]
if !ok || logsCBTopic == nil {
logsCBTopic = make(chan *topicLog, l.logsBufferSize)
logsCB[log.topic] = logsCBTopic
}
l.mlogsBuffer.Unlock()
logsCBTopic <- log
}
}
}
// dispatch
// 分配消息
func (l *Loghub) dispatch() error {
// TODO: logproject, logstore, topic
// 指定logproject和logstore进行分配
for {
select {
case msg := <-l.messages:
data, err := unserialize(msg)
if err != nil {
fmt.Println(err)
continue
}
logprojectName, ok1 := data["logproject"]
if !ok1 || logprojectName == "" {
fmt.Println("loghub.dispatch: data[\"logproject\"] was empty")
continue
}
logstoreName, ok2 := data["logstore"]
if !ok2 || logstoreName == "" {
fmt.Println("loghub.dispatch: data[\"logstore\"] was empty")
continue
}
topic, ok3 := data["topic"]
if !ok3 || topic == "" {
fmt.Println("loghub.dispatch: data[\"topic\"] was empty")
continue
}
log, err := generateLog(data)
if err != nil {
fmt.Println(err)
continue
}
// logstore, err := l.getLogstore(logstoreName)
// if err != nil {
// fmt.Println(err)
// continue
// }
log.cmsg = msg
lblsc := l.getLogstoreLogsBufferChannel(logstoreName)
select {
// TODO: 考虑优化处理, lblsc如果满了的情况
case lblsc <- log:
}
}
}
}
func (l *Loghub) Stop() {
l.stop <- 0
}
func (l *Loghub) processTopicMsg(topic string, logstoreName string) error {
cb := l.logsBuffer[logstoreName][topic]
for {
select {
case log := <-cb:
loggroup := generateLoggroupByTopicLog(log, "")
logstore, err := l.getLogstore(logstoreName)
if err != nil {
fmt.Println(err)
continue
}
err = putLogs(logstore, loggroup)
if err != nil {
fmt.Println(err)
continue
}
l.consumer.MarkOffset(log.cmsg, "loghub.processTopicMsg")
}
}
}
func (l *Loghub) getLogstore(logstoreName string) (*sls.LogStore, error) {
var logstore *sls.LogStore
l.m.RLock()
logstore = l.logstores[logstoreName]
l.m.RUnlock()
if logstore != nil {
return logstore, nil
}
var err error
for retry_times := 0; ; retry_times++ {
if retry_times > 5 {
return nil, errors.New("GetLogStore retry_times > 5")
}
logstore, err = l.logproject.GetLogStore(logstoreName)
if err != nil {
fmt.Printf("GetLogStore fail, retry:%d, err:%v\n", retry_times, err)
if strings.Contains(err.Error(), sls.PROJECT_NOT_EXIST) {
return nil, err
} else if strings.Contains(err.Error(), sls.LOGSTORE_NOT_EXIST) {
err = l.logproject.CreateLogStore(logstoreName, 1, 2)
if err != nil {
fmt.Printf("CreateLogStore fail, err: ", err.Error())
return nil, err
} else {
fmt.Println("CreateLogStore success")
l.m.Lock()
l.logstores[logstoreName] = logstore
l.m.Unlock()
return logstore, nil
}
}
} else { | fmt.Printf("GetLogStore success, retry:%d, name: %s, ttl: %d, shardCount: %d, createTime: %d, lastModifyTime: %d\n", retry_times, logstore.Name, logstore.TTL, logstore.ShardCount, logstore.CreateTime, logstore.LastModifyTime)
l.m.Lock()
l.logstores[logstoreName] = logstore
l.m.Unlock()
return logstore, nil
}
}
}
func (l *Loghub) getLogstoreLogsBufferChannel(logstoreName string) chan *topicLog {
l.mlogstoreLogsBuffer.RLock()
slslchan, ok := l.logsBuffer4Logstore[logstoreName]
l.mlogstoreLogsBuffer.RUnlock()
if !ok || slslchan == nil {
slslchan = make(chan *topicLog, l.logsBufferSize4Logstore)
l.mlogstoreLogsBuffer.Lock()
l.logsBuffer4Logstore[logstoreName] = slslchan
l.mlogstoreLogsBuffer.Unlock()
}
return slslchan
}
func generateLoggroupByTopicLog(tlog *topicLog, source string) *sls.LogGroup {
logs := []*sls.Log{tlog.log}
loggroup := &sls.LogGroup{
Topic: proto.String(tlog.topic),
Source: proto.String(source),
Logs: logs,
}
return loggroup
}
func unserialize(msg *sarama.ConsumerMessage) (map[string]string, error) {
var err error
data := map[string]string{}
err = json.Unmarshal(msg.Value, &data)
if err != nil {
fmt.Printf("[unserialize sarama.ConsumerMessage] json.Unmarshal err: %v\n", err)
return nil, err
}
return data, nil
}
func generateLog(data map[string]string) (*topicLog, error) {
contents := []*sls.LogContent{}
for k, v := range data {
contents = append(contents, &sls.LogContent{
Key: proto.String(k),
Value: proto.String(v),
})
}
t, e := time.Parse("2006-01-02T15:04:05+08:00", data["@timestamp"])
if e != nil {
t = time.Now()
}
log := &sls.Log{
Time: proto.Uint32(uint32(t.Unix())),
Contents: contents,
}
tplog := &topicLog{
topic: data["topic"],
log: log,
}
return tplog, nil
}
func putLogs(logstore *sls.LogStore, loggroup *sls.LogGroup) error {
var retry_times int
var err error
// PostLogStoreLogs API Ref: https://intl.aliyun.com/help/doc-detail/29026.htm
for retry_times = 0; retry_times < 10; retry_times++ {
err = logstore.PutLogs(loggroup)
if err == nil {
fmt.Printf("PutLogs success, retry: %d\n", retry_times)
return nil
}
fmt.Printf("PutLogs fail, retry: %d, err: %s\n", retry_times, err)
//handle exception here, you can add retryable erorrCode, set appropriate put_retry
if strings.Contains(err.Error(), sls.WRITE_QUOTA_EXCEED) || strings.Contains(err.Error(), sls.PROJECT_QUOTA_EXCEED) || strings.Contains(err.Error(), sls.SHARD_WRITE_QUOTA_EXCEED) {
//you should split shard
} else if strings.Contains(err.Error(), sls.INTERNAL_SERVER_ERROR) || strings.Contains(err.Error(), sls.SERVER_BUSY) {
}
continue
}
return err
}
// func PutMsg(msg *sarama.ConsumerMessage) error {
// var err error
// data := map[string]string{}
// err = json.Unmarshal(msg.Value, &data)
// if err != nil {
// fmt.Printf("[PutMsg] json.Unmarshal err: ", err)
// return err
// }
// loggroup := generateLoggroupByTopic([]map[string]string{data}, msg.Topic, "")
// err = putLogs(logstore, loggroup)
// if err != nil {
// fmt.Printf("loghub putlogs failed. err:%v\n", err)
// return err
// }
// return nil
// }
// func PutMsgs(msgs []*sarama.ConsumerMessage) {
// fmt.Println("loghub sample begin")
// var err error
// // put logs to logstore
// datas := []map[string]string{
// {"topic": "gateway-error", "key": strconv.FormatInt(time.Now().UnixNano(), 10), "message": "hello world!"},
// {"topic": "gateway-error", "key": strconv.FormatInt(time.Now().UnixNano(), 10), "message": "hello world!"},
// {"topic": "gateway-error", "key": strconv.FormatInt(time.Now().UnixNano(), 10), "message": "hello world!"},
// }
// loggroup := generateLoggroupByTopic(datas, "gateway-error")
// err = putLogs(logstore, loggroup)
// if err != nil {
// fmt.Printf("loghub putlogs failed. err:%v\n", err)
// }
// fmt.Println("loghub sample end")
// }
// func generateLoggroupByTopic(datas []map[string]string, topic string, source string) *sls.LogGroup {
// logs := []*sls.Log{}
// for _, data := range datas {
// contents := []*sls.LogContent{}
// for k, v := range data {
// contents = append(contents, &sls.LogContent{
// Key: proto.String(k),
// Value: proto.String(v),
// })
// }
// log := &sls.Log{
// Time: proto.Uint32(uint32(time.Now().Unix())),
// Contents: contents,
// }
// logs = append(logs, log)
// }
// loggroup := &sls.LogGroup{
// Topic: proto.String(topic),
// Source: proto.String(source),
// Logs: logs,
// }
// return loggroup
// }
// func generateLog(msg *sarama.ConsumerMessage) (*sls.Log, error) {
// var err error
// data := map[string]string{}
// err = json.Unmarshal(msg.Value, &data)
// if err != nil {
// fmt.Printf("[generateLog] json.Unmarshal err: %v\n", err)
// return nil, err
// }
// contents := []*sls.LogContent{}
// for k, v := range data {
// contents = append(contents, &sls.LogContent{
// Key: proto.String(k),
// Value: proto.String(v),
// })
// }
// t, e := time.Parse("2006-01-02T15:04:05+08:00", data["@timestamp"])
// if e != nil {
// t = time.Now()
// }
// log := &sls.Log{
// Time: proto.Uint32(uint32(t.Unix())),
// Contents: contents,
// }
// return log, nil
// } | random_line_split | |
main.go | package main
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"strings"
"sync"
"time"
"os/signal"
"go-learning/aliyunkafka"
"github.com/Shopify/sarama"
sls "github.com/aliyun/aliyun-log-go-sdk"
"github.com/bsm/sarama-cluster"
"github.com/gogo/protobuf/proto"
)
var cfg *configs.MqConfig
var consumer *cluster.Consumer
var sig chan os.Signal
var loghub *Loghub
func init() {
fmt.Println("init kafka consumer")
var err error
// loghub
lhcfg := &Config{}
flag.StringVar(&lhcfg.LogProject.Name, "projectname", "epaper", "loghub project name")
flag.StringVar(&lhcfg.LogProject.Endpoint, "endpoint", "cn-beijing.log.aliyuncs.com", "loghub endpoint")
flag.StringVar(&lhcfg.LogProject.AccessKeyID, "accesskeyid", "", "loghub AccessKeyID")
flag.StringVar(&lhcfg.LogProject.AccessKeySecret, "accesskeysecret", "", "loghub AccessKeySecret")
cfg := &configs.MqConfig{}
configs.LoadJsonConfig(cfg, "mq.json")
flag.StringVar(&cfg.Ak, "ak", "", "access key")
flag.StringVar(&cfg.Password, "password", "", "password")
flag.Parse()
fmt.Printf("load config: %v\n", cfg)
clusterCfg := cluster.NewConfig()
clusterCfg.Net.SASL.Enable = true
clusterCfg.Net.SASL.User = cfg.Ak
clusterCfg.Net.SASL.Password = cfg.Password
clusterCfg.Net.SASL.Handshake = true
certBytes, err := ioutil.ReadFile(configs.GetFullPath(cfg.CertFile))
clientCertPool := x509.NewCertPool()
ok := clientCertPool.AppendCertsFromPEM(certBytes)
if !ok {
panic("kafka consumer failed to parse root certificate")
}
clusterCfg.Net.TLS.Config = &tls.Config{
//Certificates: []tls.Certificate{},
RootCAs: clientCertPool,
InsecureSkipVerify: true,
}
clusterCfg.Net.TLS.Enable = true
clusterCfg.Consumer.Return.Errors = true
clusterCfg.Consumer.Offsets.Initial = sarama.OffsetOldest
clusterCfg.Group.Return.Notifications = true
clusterCfg.ChannelBufferSize = 1024
clusterCfg.Version = sarama.V0_10_0_0
if err = clusterCfg.Validate(); err != nil {
msg := fmt.Sprintf("Kafka consumer config invalidate. config: %v. err: %v", *clusterCfg, err)
fmt.Println(msg)
panic(msg)
}
consumer, err = cluster.NewConsumer(cfg.Servers, cfg.ConsumerId, cfg.Topics, clusterCfg)
if err != nil {
msg := fmt.Sprintf("Create kafka consumer error: %v. config: %v", err, clusterCfg)
fmt.Println(msg)
panic(msg)
}
sig = make(chan os.Signal, 1)
// loghub
loghub = NewLoghub(lhcfg, consumer)
loghub.Run()
}
func Start() {
go consume()
}
func | () {
for {
select {
case msg, more := <-consumer.Messages():
if more {
fmt.Printf("kafka consumer msg: (topic:%s) (partition:%d) (offset:%d) (%s): (%s)\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
loghub.Input() <- msg
// consumer.MarkOffset(msg, "completed") // mark message as processed
// fmt.Println("kafka consumer HighWaterMarks", consumer.HighWaterMarks())
}
case err, more := <-consumer.Errors():
if more {
fmt.Printf("Kafka consumer error: %v\n", err.Error())
}
case ntf, more := <-consumer.Notifications():
if more {
fmt.Printf("Kafka consumer rebalance: %v\n", ntf)
}
case <-sig:
fmt.Errorf("Stop consumer server...")
consumer.Close()
return
}
}
}
func Stop(s os.Signal) {
fmt.Println("Recived kafka consumer stop signal...")
sig <- s
fmt.Println("kafka consumer stopped!!!")
}
func main() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
Start()
select {
case s := <-signals:
Stop(s)
}
}
type Config struct {
Name string
LogProject struct {
Name string
Endpoint string
AccessKeyID string
AccessKeySecret string
}
MessageChannelBufferSize int
LogsBufferSize int
Topics []string
LogsBufferSize4Logstore int
Logstores []string
}
type Loghub struct {
Name string
*Config
consumer *cluster.Consumer
logproject *sls.LogProject
logstores map[string]*sls.LogStore
messages chan *sarama.ConsumerMessage
messageChannelBufferSize int
m sync.RWMutex
stop chan int
mlogstoreLogsBuffer sync.RWMutex
logsBuffer4Logstore map[string](chan *topicLog) // by logstore
logsBufferSize4Logstore int
mlogsBuffer sync.RWMutex
// logsBuffer map[string](chan *topicLog) // by topic and logstore
logsBuffer map[string]map[string]chan *topicLog // by topic and logstore
logsBufferSize int
topics []string
}
type topicLog struct {
topic string
log *sls.Log
cmsg *sarama.ConsumerMessage
}
func NewLoghub(cfg *Config, consumer *cluster.Consumer) *Loghub {
logproject := &sls.LogProject{
Name: cfg.LogProject.Name,
Endpoint: cfg.LogProject.Endpoint,
AccessKeyID: cfg.LogProject.AccessKeyID,
AccessKeySecret: cfg.LogProject.AccessKeySecret,
}
lbls := map[string](chan *topicLog){}
lbr := map[string]map[string](chan *topicLog){}
// lbr := map[string](chan *topicLog){}
// for _, tp := range cfg.Topics {
// lbr[tp] = make(chan *topicLog, cfg.LogsBufferSize)
// }
lh := &Loghub{
Name: cfg.Name,
Config: cfg,
consumer: consumer,
logproject: logproject,
messages: make(chan *sarama.ConsumerMessage, cfg.MessageChannelBufferSize),
messageChannelBufferSize: cfg.MessageChannelBufferSize,
stop: make(chan int),
logsBuffer4Logstore: lbls,
logsBufferSize4Logstore: cfg.LogsBufferSize4Logstore,
logstores: map[string]*sls.LogStore{},
logsBuffer: lbr,
logsBufferSize: cfg.LogsBufferSize,
topics: cfg.Topics,
}
return lh
}
func (l *Loghub) Run() {
// lss, err := l.logproject.ListLogStore()
// if err != nil {
// panic(err)
// }
// 开启日志库
for _, lsn := range l.Logstores {
_, err := l.getLogstore(lsn)
if err != nil {
fmt.Printf("Loghub Start failed (logstoreName=%s). err: %v\n", lsn, err)
panic(err)
}
// 分配到topic
go l.dispatchToTopic(lsn)
for _, tp := range l.topics {
go l.processTopicMsg(lsn, tp)
}
}
// 分配消息
go l.dispatch()
}
func (l *Loghub) Input() chan<- *sarama.ConsumerMessage {
return l.messages
}
// dispatchToTopic
func (l *Loghub) dispatchToTopic(logstoreName string) {
// TODO: 处理消息, 分配到不同的topic
channelBuffer := l.getLogstoreLogsBufferChannel(logstoreName)
for {
select {
case log := <-channelBuffer:
l.mlogsBuffer.Lock()
logsCB, ok := l.logsBuffer[logstoreName]
if !ok || logsCB == nil {
logsCB = map[string]chan *topicLog{}
l.logsBuffer[logstoreName] = logsCB
}
logsCBTopic, ok := logsCB[log.topic]
if !ok || logsCBTopic == nil {
logsCBTopic = make(chan *topicLog, l.logsBufferSize)
logsCB[log.topic] = logsCBTopic
}
l.mlogsBuffer.Unlock()
logsCBTopic <- log
}
}
}
// dispatch
// 分配消息
func (l *Loghub) dispatch() error {
// TODO: logproject, logstore, topic
// 指定logproject和logstore进行分配
for {
select {
case msg := <-l.messages:
data, err := unserialize(msg)
if err != nil {
fmt.Println(err)
continue
}
logprojectName, ok1 := data["logproject"]
if !ok1 || logprojectName == "" {
fmt.Println("loghub.dispatch: data[\"logproject\"] was empty")
continue
}
logstoreName, ok2 := data["logstore"]
if !ok2 || logstoreName == "" {
fmt.Println("loghub.dispatch: data[\"logstore\"] was empty")
continue
}
topic, ok3 := data["topic"]
if !ok3 || topic == "" {
fmt.Println("loghub.dispatch: data[\"topic\"] was empty")
continue
}
log, err := generateLog(data)
if err != nil {
fmt.Println(err)
continue
}
// logstore, err := l.getLogstore(logstoreName)
// if err != nil {
// fmt.Println(err)
// continue
// }
log.cmsg = msg
lblsc := l.getLogstoreLogsBufferChannel(logstoreName)
select {
// TODO: 考虑优化处理, lblsc如果满了的情况
case lblsc <- log:
}
}
}
}
func (l *Loghub) Stop() {
l.stop <- 0
}
func (l *Loghub) processTopicMsg(topic string, logstoreName string) error {
cb := l.logsBuffer[logstoreName][topic]
for {
select {
case log := <-cb:
loggroup := generateLoggroupByTopicLog(log, "")
logstore, err := l.getLogstore(logstoreName)
if err != nil {
fmt.Println(err)
continue
}
err = putLogs(logstore, loggroup)
if err != nil {
fmt.Println(err)
continue
}
l.consumer.MarkOffset(log.cmsg, "loghub.processTopicMsg")
}
}
}
func (l *Loghub) getLogstore(logstoreName string) (*sls.LogStore, error) {
var logstore *sls.LogStore
l.m.RLock()
logstore = l.logstores[logstoreName]
l.m.RUnlock()
if logstore != nil {
return logstore, nil
}
var err error
for retry_times := 0; ; retry_times++ {
if retry_times > 5 {
return nil, errors.New("GetLogStore retry_times > 5")
}
logstore, err = l.logproject.GetLogStore(logstoreName)
if err != nil {
fmt.Printf("GetLogStore fail, retry:%d, err:%v\n", retry_times, err)
if strings.Contains(err.Error(), sls.PROJECT_NOT_EXIST) {
return nil, err
} else if strings.Contains(err.Error(), sls.LOGSTORE_NOT_EXIST) {
err = l.logproject.CreateLogStore(logstoreName, 1, 2)
if err != nil {
fmt.Printf("CreateLogStore fail, err: ", err.Error())
return nil, err
} else {
fmt.Println("CreateLogStore success")
l.m.Lock()
l.logstores[logstoreName] = logstore
l.m.Unlock()
return logstore, nil
}
}
} else {
fmt.Printf("GetLogStore success, retry:%d, name: %s, ttl: %d, shardCount: %d, createTime: %d, lastModifyTime: %d\n", retry_times, logstore.Name, logstore.TTL, logstore.ShardCount, logstore.CreateTime, logstore.LastModifyTime)
l.m.Lock()
l.logstores[logstoreName] = logstore
l.m.Unlock()
return logstore, nil
}
}
}
func (l *Loghub) getLogstoreLogsBufferChannel(logstoreName string) chan *topicLog {
l.mlogstoreLogsBuffer.RLock()
slslchan, ok := l.logsBuffer4Logstore[logstoreName]
l.mlogstoreLogsBuffer.RUnlock()
if !ok || slslchan == nil {
slslchan = make(chan *topicLog, l.logsBufferSize4Logstore)
l.mlogstoreLogsBuffer.Lock()
l.logsBuffer4Logstore[logstoreName] = slslchan
l.mlogstoreLogsBuffer.Unlock()
}
return slslchan
}
func generateLoggroupByTopicLog(tlog *topicLog, source string) *sls.LogGroup {
logs := []*sls.Log{tlog.log}
loggroup := &sls.LogGroup{
Topic: proto.String(tlog.topic),
Source: proto.String(source),
Logs: logs,
}
return loggroup
}
func unserialize(msg *sarama.ConsumerMessage) (map[string]string, error) {
var err error
data := map[string]string{}
err = json.Unmarshal(msg.Value, &data)
if err != nil {
fmt.Printf("[unserialize sarama.ConsumerMessage] json.Unmarshal err: %v\n", err)
return nil, err
}
return data, nil
}
func generateLog(data map[string]string) (*topicLog, error) {
contents := []*sls.LogContent{}
for k, v := range data {
contents = append(contents, &sls.LogContent{
Key: proto.String(k),
Value: proto.String(v),
})
}
t, e := time.Parse("2006-01-02T15:04:05+08:00", data["@timestamp"])
if e != nil {
t = time.Now()
}
log := &sls.Log{
Time: proto.Uint32(uint32(t.Unix())),
Contents: contents,
}
tplog := &topicLog{
topic: data["topic"],
log: log,
}
return tplog, nil
}
func putLogs(logstore *sls.LogStore, loggroup *sls.LogGroup) error {
var retry_times int
var err error
// PostLogStoreLogs API Ref: https://intl.aliyun.com/help/doc-detail/29026.htm
for retry_times = 0; retry_times < 10; retry_times++ {
err = logstore.PutLogs(loggroup)
if err == nil {
fmt.Printf("PutLogs success, retry: %d\n", retry_times)
return nil
}
fmt.Printf("PutLogs fail, retry: %d, err: %s\n", retry_times, err)
//handle exception here, you can add retryable erorrCode, set appropriate put_retry
if strings.Contains(err.Error(), sls.WRITE_QUOTA_EXCEED) || strings.Contains(err.Error(), sls.PROJECT_QUOTA_EXCEED) || strings.Contains(err.Error(), sls.SHARD_WRITE_QUOTA_EXCEED) {
//you should split shard
} else if strings.Contains(err.Error(), sls.INTERNAL_SERVER_ERROR) || strings.Contains(err.Error(), sls.SERVER_BUSY) {
}
continue
}
return err
}
// func PutMsg(msg *sarama.ConsumerMessage) error {
// var err error
// data := map[string]string{}
// err = json.Unmarshal(msg.Value, &data)
// if err != nil {
// fmt.Printf("[PutMsg] json.Unmarshal err: ", err)
// return err
// }
// loggroup := generateLoggroupByTopic([]map[string]string{data}, msg.Topic, "")
// err = putLogs(logstore, loggroup)
// if err != nil {
// fmt.Printf("loghub putlogs failed. err:%v\n", err)
// return err
// }
// return nil
// }
// func PutMsgs(msgs []*sarama.ConsumerMessage) {
// fmt.Println("loghub sample begin")
// var err error
// // put logs to logstore
// datas := []map[string]string{
// {"topic": "gateway-error", "key": strconv.FormatInt(time.Now().UnixNano(), 10), "message": "hello world!"},
// {"topic": "gateway-error", "key": strconv.FormatInt(time.Now().UnixNano(), 10), "message": "hello world!"},
// {"topic": "gateway-error", "key": strconv.FormatInt(time.Now().UnixNano(), 10), "message": "hello world!"},
// }
// loggroup := generateLoggroupByTopic(datas, "gateway-error")
// err = putLogs(logstore, loggroup)
// if err != nil {
// fmt.Printf("loghub putlogs failed. err:%v\n", err)
// }
// fmt.Println("loghub sample end")
// }
// func generateLoggroupByTopic(datas []map[string]string, topic string, source string) *sls.LogGroup {
// logs := []*sls.Log{}
// for _, data := range datas {
// contents := []*sls.LogContent{}
// for k, v := range data {
// contents = append(contents, &sls.LogContent{
// Key: proto.String(k),
// Value: proto.String(v),
// })
// }
// log := &sls.Log{
// Time: proto.Uint32(uint32(time.Now().Unix())),
// Contents: contents,
// }
// logs = append(logs, log)
// }
// loggroup := &sls.LogGroup{
// Topic: proto.String(topic),
// Source: proto.String(source),
// Logs: logs,
// }
// return loggroup
// }
// func generateLog(msg *sarama.ConsumerMessage) (*sls.Log, error) {
// var err error
// data := map[string]string{}
// err = json.Unmarshal(msg.Value, &data)
// if err != nil {
// fmt.Printf("[generateLog] json.Unmarshal err: %v\n", err)
// return nil, err
// }
// contents := []*sls.LogContent{}
// for k, v := range data {
// contents = append(contents, &sls.LogContent{
// Key: proto.String(k),
// Value: proto.String(v),
// })
// }
// t, e := time.Parse("2006-01-02T15:04:05+08:00", data["@timestamp"])
// if e != nil {
// t = time.Now()
// }
// log := &sls.Log{
// Time: proto.Uint32(uint32(t.Unix())),
// Contents: contents,
// }
// return log, nil
// }
| consume | identifier_name |
main.go | package main
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"strings"
"sync"
"time"
"os/signal"
"go-learning/aliyunkafka"
"github.com/Shopify/sarama"
sls "github.com/aliyun/aliyun-log-go-sdk"
"github.com/bsm/sarama-cluster"
"github.com/gogo/protobuf/proto"
)
var cfg *configs.MqConfig
var consumer *cluster.Consumer
var sig chan os.Signal
var loghub *Loghub
func init() {
fmt.Println("init kafka consumer")
var err error
// loghub
lhcfg := &Config{}
flag.StringVar(&lhcfg.LogProject.Name, "projectname", "epaper", "loghub project name")
flag.StringVar(&lhcfg.LogProject.Endpoint, "endpoint", "cn-beijing.log.aliyuncs.com", "loghub endpoint")
flag.StringVar(&lhcfg.LogProject.AccessKeyID, "accesskeyid", "", "loghub AccessKeyID")
flag.StringVar(&lhcfg.LogProject.AccessKeySecret, "accesskeysecret", "", "loghub AccessKeySecret")
cfg := &configs.MqConfig{}
configs.LoadJsonConfig(cfg, "mq.json")
flag.StringVar(&cfg.Ak, "ak", "", "access key")
flag.StringVar(&cfg.Password, "password", "", "password")
flag.Parse()
fmt.Printf("load config: %v\n", cfg)
clusterCfg := cluster.NewConfig()
clusterCfg.Net.SASL.Enable = true
clusterCfg.Net.SASL.User = cfg.Ak
clusterCfg.Net.SASL.Password = cfg.Password
clusterCfg.Net.SASL.Handshake = true
certBytes, err := ioutil.ReadFile(configs.GetFullPath(cfg.CertFile))
clientCertPool := x509.NewCertPool()
ok := clientCertPool.AppendCertsFromPEM(certBytes)
if !ok {
panic("kafka consumer failed to parse root certificate")
}
clusterCfg.Net.TLS.Config = &tls.Config{
//Certificates: []tls.Certificate{},
RootCAs: clientCertPool,
InsecureSkipVerify: true,
}
clusterCfg.Net.TLS.Enable = true
clusterCfg.Consumer.Return.Errors = true
clusterCfg.Consumer.Offsets.Initial = sarama.OffsetOldest
clusterCfg.Group.Return.Notifications = true
clusterCfg.ChannelBufferSize = 1024
clusterCfg.Version = sarama.V0_10_0_0
if err = clusterCfg.Validate(); err != nil {
msg := fmt.Sprintf("Kafka consumer config invalidate. config: %v. err: %v", *clusterCfg, err)
fmt.Println(msg)
panic(msg)
}
consumer, err = cluster.NewConsumer(cfg.Servers, cfg.ConsumerId, cfg.Topics, clusterCfg)
if err != nil {
msg := fmt.Sprintf("Create kafka consumer error: %v. config: %v", err, clusterCfg)
fmt.Println(msg)
panic(msg)
}
sig = make(chan os.Signal, 1)
// loghub
loghub = NewLoghub(lhcfg, consumer)
loghub.Run()
}
func Start() {
go consume()
}
func consume() {
for {
select {
case msg, more := <-consumer.Messages():
if more {
fmt.Printf("kafka consumer msg: (topic:%s) (partition:%d) (offset:%d) (%s): (%s)\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
loghub.Input() <- msg
// consumer.MarkOffset(msg, "completed") // mark message as processed
// fmt.Println("kafka consumer HighWaterMarks", consumer.HighWaterMarks())
}
case err, more := <-consumer.Errors():
if more {
fmt.Printf("Kafka consumer error: %v\n", err.Error())
}
case ntf, more := <-consumer.Notifications():
if more {
fmt.Printf("Kafka consumer rebalance: %v\n", ntf)
}
case <-sig:
fmt.Errorf("Stop consumer server...")
consumer.Close()
return
}
}
}
func Stop(s os.Signal) |
func main() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
Start()
select {
case s := <-signals:
Stop(s)
}
}
type Config struct {
Name string
LogProject struct {
Name string
Endpoint string
AccessKeyID string
AccessKeySecret string
}
MessageChannelBufferSize int
LogsBufferSize int
Topics []string
LogsBufferSize4Logstore int
Logstores []string
}
type Loghub struct {
Name string
*Config
consumer *cluster.Consumer
logproject *sls.LogProject
logstores map[string]*sls.LogStore
messages chan *sarama.ConsumerMessage
messageChannelBufferSize int
m sync.RWMutex
stop chan int
mlogstoreLogsBuffer sync.RWMutex
logsBuffer4Logstore map[string](chan *topicLog) // by logstore
logsBufferSize4Logstore int
mlogsBuffer sync.RWMutex
// logsBuffer map[string](chan *topicLog) // by topic and logstore
logsBuffer map[string]map[string]chan *topicLog // by topic and logstore
logsBufferSize int
topics []string
}
type topicLog struct {
topic string
log *sls.Log
cmsg *sarama.ConsumerMessage
}
func NewLoghub(cfg *Config, consumer *cluster.Consumer) *Loghub {
logproject := &sls.LogProject{
Name: cfg.LogProject.Name,
Endpoint: cfg.LogProject.Endpoint,
AccessKeyID: cfg.LogProject.AccessKeyID,
AccessKeySecret: cfg.LogProject.AccessKeySecret,
}
lbls := map[string](chan *topicLog){}
lbr := map[string]map[string](chan *topicLog){}
// lbr := map[string](chan *topicLog){}
// for _, tp := range cfg.Topics {
// lbr[tp] = make(chan *topicLog, cfg.LogsBufferSize)
// }
lh := &Loghub{
Name: cfg.Name,
Config: cfg,
consumer: consumer,
logproject: logproject,
messages: make(chan *sarama.ConsumerMessage, cfg.MessageChannelBufferSize),
messageChannelBufferSize: cfg.MessageChannelBufferSize,
stop: make(chan int),
logsBuffer4Logstore: lbls,
logsBufferSize4Logstore: cfg.LogsBufferSize4Logstore,
logstores: map[string]*sls.LogStore{},
logsBuffer: lbr,
logsBufferSize: cfg.LogsBufferSize,
topics: cfg.Topics,
}
return lh
}
func (l *Loghub) Run() {
// lss, err := l.logproject.ListLogStore()
// if err != nil {
// panic(err)
// }
// 开启日志库
for _, lsn := range l.Logstores {
_, err := l.getLogstore(lsn)
if err != nil {
fmt.Printf("Loghub Start failed (logstoreName=%s). err: %v\n", lsn, err)
panic(err)
}
// 分配到topic
go l.dispatchToTopic(lsn)
for _, tp := range l.topics {
go l.processTopicMsg(lsn, tp)
}
}
// 分配消息
go l.dispatch()
}
func (l *Loghub) Input() chan<- *sarama.ConsumerMessage {
return l.messages
}
// dispatchToTopic
func (l *Loghub) dispatchToTopic(logstoreName string) {
// TODO: 处理消息, 分配到不同的topic
channelBuffer := l.getLogstoreLogsBufferChannel(logstoreName)
for {
select {
case log := <-channelBuffer:
l.mlogsBuffer.Lock()
logsCB, ok := l.logsBuffer[logstoreName]
if !ok || logsCB == nil {
logsCB = map[string]chan *topicLog{}
l.logsBuffer[logstoreName] = logsCB
}
logsCBTopic, ok := logsCB[log.topic]
if !ok || logsCBTopic == nil {
logsCBTopic = make(chan *topicLog, l.logsBufferSize)
logsCB[log.topic] = logsCBTopic
}
l.mlogsBuffer.Unlock()
logsCBTopic <- log
}
}
}
// dispatch
// 分配消息
func (l *Loghub) dispatch() error {
// TODO: logproject, logstore, topic
// 指定logproject和logstore进行分配
for {
select {
case msg := <-l.messages:
data, err := unserialize(msg)
if err != nil {
fmt.Println(err)
continue
}
logprojectName, ok1 := data["logproject"]
if !ok1 || logprojectName == "" {
fmt.Println("loghub.dispatch: data[\"logproject\"] was empty")
continue
}
logstoreName, ok2 := data["logstore"]
if !ok2 || logstoreName == "" {
fmt.Println("loghub.dispatch: data[\"logstore\"] was empty")
continue
}
topic, ok3 := data["topic"]
if !ok3 || topic == "" {
fmt.Println("loghub.dispatch: data[\"topic\"] was empty")
continue
}
log, err := generateLog(data)
if err != nil {
fmt.Println(err)
continue
}
// logstore, err := l.getLogstore(logstoreName)
// if err != nil {
// fmt.Println(err)
// continue
// }
log.cmsg = msg
lblsc := l.getLogstoreLogsBufferChannel(logstoreName)
select {
// TODO: 考虑优化处理, lblsc如果满了的情况
case lblsc <- log:
}
}
}
}
func (l *Loghub) Stop() {
l.stop <- 0
}
func (l *Loghub) processTopicMsg(topic string, logstoreName string) error {
cb := l.logsBuffer[logstoreName][topic]
for {
select {
case log := <-cb:
loggroup := generateLoggroupByTopicLog(log, "")
logstore, err := l.getLogstore(logstoreName)
if err != nil {
fmt.Println(err)
continue
}
err = putLogs(logstore, loggroup)
if err != nil {
fmt.Println(err)
continue
}
l.consumer.MarkOffset(log.cmsg, "loghub.processTopicMsg")
}
}
}
func (l *Loghub) getLogstore(logstoreName string) (*sls.LogStore, error) {
var logstore *sls.LogStore
l.m.RLock()
logstore = l.logstores[logstoreName]
l.m.RUnlock()
if logstore != nil {
return logstore, nil
}
var err error
for retry_times := 0; ; retry_times++ {
if retry_times > 5 {
return nil, errors.New("GetLogStore retry_times > 5")
}
logstore, err = l.logproject.GetLogStore(logstoreName)
if err != nil {
fmt.Printf("GetLogStore fail, retry:%d, err:%v\n", retry_times, err)
if strings.Contains(err.Error(), sls.PROJECT_NOT_EXIST) {
return nil, err
} else if strings.Contains(err.Error(), sls.LOGSTORE_NOT_EXIST) {
err = l.logproject.CreateLogStore(logstoreName, 1, 2)
if err != nil {
fmt.Printf("CreateLogStore fail, err: ", err.Error())
return nil, err
} else {
fmt.Println("CreateLogStore success")
l.m.Lock()
l.logstores[logstoreName] = logstore
l.m.Unlock()
return logstore, nil
}
}
} else {
fmt.Printf("GetLogStore success, retry:%d, name: %s, ttl: %d, shardCount: %d, createTime: %d, lastModifyTime: %d\n", retry_times, logstore.Name, logstore.TTL, logstore.ShardCount, logstore.CreateTime, logstore.LastModifyTime)
l.m.Lock()
l.logstores[logstoreName] = logstore
l.m.Unlock()
return logstore, nil
}
}
}
func (l *Loghub) getLogstoreLogsBufferChannel(logstoreName string) chan *topicLog {
l.mlogstoreLogsBuffer.RLock()
slslchan, ok := l.logsBuffer4Logstore[logstoreName]
l.mlogstoreLogsBuffer.RUnlock()
if !ok || slslchan == nil {
slslchan = make(chan *topicLog, l.logsBufferSize4Logstore)
l.mlogstoreLogsBuffer.Lock()
l.logsBuffer4Logstore[logstoreName] = slslchan
l.mlogstoreLogsBuffer.Unlock()
}
return slslchan
}
func generateLoggroupByTopicLog(tlog *topicLog, source string) *sls.LogGroup {
logs := []*sls.Log{tlog.log}
loggroup := &sls.LogGroup{
Topic: proto.String(tlog.topic),
Source: proto.String(source),
Logs: logs,
}
return loggroup
}
func unserialize(msg *sarama.ConsumerMessage) (map[string]string, error) {
var err error
data := map[string]string{}
err = json.Unmarshal(msg.Value, &data)
if err != nil {
fmt.Printf("[unserialize sarama.ConsumerMessage] json.Unmarshal err: %v\n", err)
return nil, err
}
return data, nil
}
func generateLog(data map[string]string) (*topicLog, error) {
contents := []*sls.LogContent{}
for k, v := range data {
contents = append(contents, &sls.LogContent{
Key: proto.String(k),
Value: proto.String(v),
})
}
t, e := time.Parse("2006-01-02T15:04:05+08:00", data["@timestamp"])
if e != nil {
t = time.Now()
}
log := &sls.Log{
Time: proto.Uint32(uint32(t.Unix())),
Contents: contents,
}
tplog := &topicLog{
topic: data["topic"],
log: log,
}
return tplog, nil
}
func putLogs(logstore *sls.LogStore, loggroup *sls.LogGroup) error {
var retry_times int
var err error
// PostLogStoreLogs API Ref: https://intl.aliyun.com/help/doc-detail/29026.htm
for retry_times = 0; retry_times < 10; retry_times++ {
err = logstore.PutLogs(loggroup)
if err == nil {
fmt.Printf("PutLogs success, retry: %d\n", retry_times)
return nil
}
fmt.Printf("PutLogs fail, retry: %d, err: %s\n", retry_times, err)
//handle exception here, you can add retryable erorrCode, set appropriate put_retry
if strings.Contains(err.Error(), sls.WRITE_QUOTA_EXCEED) || strings.Contains(err.Error(), sls.PROJECT_QUOTA_EXCEED) || strings.Contains(err.Error(), sls.SHARD_WRITE_QUOTA_EXCEED) {
//you should split shard
} else if strings.Contains(err.Error(), sls.INTERNAL_SERVER_ERROR) || strings.Contains(err.Error(), sls.SERVER_BUSY) {
}
continue
}
return err
}
// func PutMsg(msg *sarama.ConsumerMessage) error {
// var err error
// data := map[string]string{}
// err = json.Unmarshal(msg.Value, &data)
// if err != nil {
// fmt.Printf("[PutMsg] json.Unmarshal err: ", err)
// return err
// }
// loggroup := generateLoggroupByTopic([]map[string]string{data}, msg.Topic, "")
// err = putLogs(logstore, loggroup)
// if err != nil {
// fmt.Printf("loghub putlogs failed. err:%v\n", err)
// return err
// }
// return nil
// }
// func PutMsgs(msgs []*sarama.ConsumerMessage) {
// fmt.Println("loghub sample begin")
// var err error
// // put logs to logstore
// datas := []map[string]string{
// {"topic": "gateway-error", "key": strconv.FormatInt(time.Now().UnixNano(), 10), "message": "hello world!"},
// {"topic": "gateway-error", "key": strconv.FormatInt(time.Now().UnixNano(), 10), "message": "hello world!"},
// {"topic": "gateway-error", "key": strconv.FormatInt(time.Now().UnixNano(), 10), "message": "hello world!"},
// }
// loggroup := generateLoggroupByTopic(datas, "gateway-error")
// err = putLogs(logstore, loggroup)
// if err != nil {
// fmt.Printf("loghub putlogs failed. err:%v\n", err)
// }
// fmt.Println("loghub sample end")
// }
// func generateLoggroupByTopic(datas []map[string]string, topic string, source string) *sls.LogGroup {
// logs := []*sls.Log{}
// for _, data := range datas {
// contents := []*sls.LogContent{}
// for k, v := range data {
// contents = append(contents, &sls.LogContent{
// Key: proto.String(k),
// Value: proto.String(v),
// })
// }
// log := &sls.Log{
// Time: proto.Uint32(uint32(time.Now().Unix())),
// Contents: contents,
// }
// logs = append(logs, log)
// }
// loggroup := &sls.LogGroup{
// Topic: proto.String(topic),
// Source: proto.String(source),
// Logs: logs,
// }
// return loggroup
// }
// func generateLog(msg *sarama.ConsumerMessage) (*sls.Log, error) {
// var err error
// data := map[string]string{}
// err = json.Unmarshal(msg.Value, &data)
// if err != nil {
// fmt.Printf("[generateLog] json.Unmarshal err: %v\n", err)
// return nil, err
// }
// contents := []*sls.LogContent{}
// for k, v := range data {
// contents = append(contents, &sls.LogContent{
// Key: proto.String(k),
// Value: proto.String(v),
// })
// }
// t, e := time.Parse("2006-01-02T15:04:05+08:00", data["@timestamp"])
// if e != nil {
// t = time.Now()
// }
// log := &sls.Log{
// Time: proto.Uint32(uint32(t.Unix())),
// Contents: contents,
// }
// return log, nil
// }
| {
fmt.Println("Recived kafka consumer stop signal...")
sig <- s
fmt.Println("kafka consumer stopped!!!")
} | identifier_body |
main.go | package main
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"strings"
"sync"
"time"
"os/signal"
"go-learning/aliyunkafka"
"github.com/Shopify/sarama"
sls "github.com/aliyun/aliyun-log-go-sdk"
"github.com/bsm/sarama-cluster"
"github.com/gogo/protobuf/proto"
)
var cfg *configs.MqConfig
var consumer *cluster.Consumer
var sig chan os.Signal
var loghub *Loghub
func init() {
fmt.Println("init kafka consumer")
var err error
// loghub
lhcfg := &Config{}
flag.StringVar(&lhcfg.LogProject.Name, "projectname", "epaper", "loghub project name")
flag.StringVar(&lhcfg.LogProject.Endpoint, "endpoint", "cn-beijing.log.aliyuncs.com", "loghub endpoint")
flag.StringVar(&lhcfg.LogProject.AccessKeyID, "accesskeyid", "", "loghub AccessKeyID")
flag.StringVar(&lhcfg.LogProject.AccessKeySecret, "accesskeysecret", "", "loghub AccessKeySecret")
cfg := &configs.MqConfig{}
configs.LoadJsonConfig(cfg, "mq.json")
flag.StringVar(&cfg.Ak, "ak", "", "access key")
flag.StringVar(&cfg.Password, "password", "", "password")
flag.Parse()
fmt.Printf("load config: %v\n", cfg)
clusterCfg := cluster.NewConfig()
clusterCfg.Net.SASL.Enable = true
clusterCfg.Net.SASL.User = cfg.Ak
clusterCfg.Net.SASL.Password = cfg.Password
clusterCfg.Net.SASL.Handshake = true
certBytes, err := ioutil.ReadFile(configs.GetFullPath(cfg.CertFile))
clientCertPool := x509.NewCertPool()
ok := clientCertPool.AppendCertsFromPEM(certBytes)
if !ok {
panic("kafka consumer failed to parse root certificate")
}
clusterCfg.Net.TLS.Config = &tls.Config{
//Certificates: []tls.Certificate{},
RootCAs: clientCertPool,
InsecureSkipVerify: true,
}
clusterCfg.Net.TLS.Enable = true
clusterCfg.Consumer.Return.Errors = true
clusterCfg.Consumer.Offsets.Initial = sarama.OffsetOldest
clusterCfg.Group.Return.Notifications = true
clusterCfg.ChannelBufferSize = 1024
clusterCfg.Version = sarama.V0_10_0_0
if err = clusterCfg.Validate(); err != nil {
msg := fmt.Sprintf("Kafka consumer config invalidate. config: %v. err: %v", *clusterCfg, err)
fmt.Println(msg)
panic(msg)
}
consumer, err = cluster.NewConsumer(cfg.Servers, cfg.ConsumerId, cfg.Topics, clusterCfg)
if err != nil {
msg := fmt.Sprintf("Create kafka consumer error: %v. config: %v", err, clusterCfg)
fmt.Println(msg)
panic(msg)
}
sig = make(chan os.Signal, 1)
// loghub
loghub = NewLoghub(lhcfg, consumer)
loghub.Run()
}
func Start() {
go consume()
}
func consume() {
for {
select {
case msg, more := <-consumer.Messages():
if more {
fmt.Printf("kafka consumer msg: (topic:%s) (partition:%d) (offset:%d) (%s): (%s)\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
loghub.Input() <- msg
// consumer.MarkOffset(msg, "completed") // mark message as processed
// fmt.Println("kafka consumer HighWaterMarks", consumer.HighWaterMarks())
}
case err, more := <-consumer.Errors():
if more {
fmt.Printf("Kafka consumer error: %v\n", err.Error())
}
case ntf, more := <-consumer.Notifications():
if more {
fmt.Printf("Kafka consumer rebalance: %v\n", ntf)
}
case <-sig:
fmt.Errorf("Stop consumer server...")
consumer.Close()
return
}
}
}
func Stop(s os.Signal) {
fmt.Println("Recived kafka consumer stop signal...")
sig <- s
fmt.Println("kafka consumer stopped!!!")
}
func main() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
Start()
select {
case s := <-signals:
Stop(s)
}
}
type Config struct {
Name string
LogProject struct {
Name string
Endpoint string
AccessKeyID string
AccessKeySecret string
}
MessageChannelBufferSize int
LogsBufferSize int
Topics []string
LogsBufferSize4Logstore int
Logstores []string
}
type Loghub struct {
Name string
*Config
consumer *cluster.Consumer
logproject *sls.LogProject
logstores map[string]*sls.LogStore
messages chan *sarama.ConsumerMessage
messageChannelBufferSize int
m sync.RWMutex
stop chan int
mlogstoreLogsBuffer sync.RWMutex
logsBuffer4Logstore map[string](chan *topicLog) // by logstore
logsBufferSize4Logstore int
mlogsBuffer sync.RWMutex
// logsBuffer map[string](chan *topicLog) // by topic and logstore
logsBuffer map[string]map[string]chan *topicLog // by topic and logstore
logsBufferSize int
topics []string
}
type topicLog struct {
topic string
log *sls.Log
cmsg *sarama.ConsumerMessage
}
func NewLoghub(cfg *Config, consumer *cluster.Consumer) *Loghub {
logproject := &sls.LogProject{
Name: cfg.LogProject.Name,
Endpoint: cfg.LogProject.Endpoint,
AccessKeyID: cfg.LogProject.AccessKeyID,
AccessKeySecret: cfg.LogProject.AccessKeySecret,
}
lbls := map[string](chan *topicLog){}
lbr := map[string]map[string](chan *topicLog){}
// lbr := map[string](chan *topicLog){}
// for _, tp := range cfg.Topics {
// lbr[tp] = make(chan *topicLog, cfg.LogsBufferSize)
// }
lh := &Loghub{
Name: cfg.Name,
Config: cfg,
consumer: consumer,
logproject: logproject,
messages: make(chan *sarama.ConsumerMessage, cfg.MessageChannelBufferSize),
messageChannelBufferSize: cfg.MessageChannelBufferSize,
stop: make(chan int),
logsBuffer4Logstore: lbls,
logsBufferSize4Logstore: cfg.LogsBufferSize4Logstore,
logstores: map[string]*sls.LogStore{},
logsBuffer: lbr,
logsBufferSize: cfg.LogsBufferSize,
topics: cfg.Topics,
}
return lh
}
func (l *Loghub) Run() {
// lss, err := l.logproject.ListLogStore()
// if err != nil {
// panic(err)
// }
// 开启日志库
for _, lsn := range l.Logstores {
_, err | .dispatch()
}
func (l *Loghub) Input() chan<- *sarama.ConsumerMessage {
return l.messages
}
// dispatchToTopic
func (l *Loghub) dispatchToTopic(logstoreName string) {
// TODO: 处理消息, 分配到不同的topic
channelBuffer := l.getLogstoreLogsBufferChannel(logstoreName)
for {
select {
case log := <-channelBuffer:
l.mlogsBuffer.Lock()
logsCB, ok := l.logsBuffer[logstoreName]
if !ok || logsCB == nil {
logsCB = map[string]chan *topicLog{}
l.logsBuffer[logstoreName] = logsCB
}
logsCBTopic, ok := logsCB[log.topic]
if !ok || logsCBTopic == nil {
logsCBTopic = make(chan *topicLog, l.logsBufferSize)
logsCB[log.topic] = logsCBTopic
}
l.mlogsBuffer.Unlock()
logsCBTopic <- log
}
}
}
// dispatch
// 分配消息
func (l *Loghub) dispatch() error {
// TODO: logproject, logstore, topic
// 指定logproject和logstore进行分配
for {
select {
case msg := <-l.messages:
data, err := unserialize(msg)
if err != nil {
fmt.Println(err)
continue
}
logprojectName, ok1 := data["logproject"]
if !ok1 || logprojectName == "" {
fmt.Println("loghub.dispatch: data[\"logproject\"] was empty")
continue
}
logstoreName, ok2 := data["logstore"]
if !ok2 || logstoreName == "" {
fmt.Println("loghub.dispatch: data[\"logstore\"] was empty")
continue
}
topic, ok3 := data["topic"]
if !ok3 || topic == "" {
fmt.Println("loghub.dispatch: data[\"topic\"] was empty")
continue
}
log, err := generateLog(data)
if err != nil {
fmt.Println(err)
continue
}
// logstore, err := l.getLogstore(logstoreName)
// if err != nil {
// fmt.Println(err)
// continue
// }
log.cmsg = msg
lblsc := l.getLogstoreLogsBufferChannel(logstoreName)
select {
// TODO: 考虑优化处理, lblsc如果满了的情况
case lblsc <- log:
}
}
}
}
func (l *Loghub) Stop() {
l.stop <- 0
}
func (l *Loghub) processTopicMsg(topic string, logstoreName string) error {
cb := l.logsBuffer[logstoreName][topic]
for {
select {
case log := <-cb:
loggroup := generateLoggroupByTopicLog(log, "")
logstore, err := l.getLogstore(logstoreName)
if err != nil {
fmt.Println(err)
continue
}
err = putLogs(logstore, loggroup)
if err != nil {
fmt.Println(err)
continue
}
l.consumer.MarkOffset(log.cmsg, "loghub.processTopicMsg")
}
}
}
func (l *Loghub) getLogstore(logstoreName string) (*sls.LogStore, error) {
var logstore *sls.LogStore
l.m.RLock()
logstore = l.logstores[logstoreName]
l.m.RUnlock()
if logstore != nil {
return logstore, nil
}
var err error
for retry_times := 0; ; retry_times++ {
if retry_times > 5 {
return nil, errors.New("GetLogStore retry_times > 5")
}
logstore, err = l.logproject.GetLogStore(logstoreName)
if err != nil {
fmt.Printf("GetLogStore fail, retry:%d, err:%v\n", retry_times, err)
if strings.Contains(err.Error(), sls.PROJECT_NOT_EXIST) {
return nil, err
} else if strings.Contains(err.Error(), sls.LOGSTORE_NOT_EXIST) {
err = l.logproject.CreateLogStore(logstoreName, 1, 2)
if err != nil {
fmt.Printf("CreateLogStore fail, err: ", err.Error())
return nil, err
} else {
fmt.Println("CreateLogStore success")
l.m.Lock()
l.logstores[logstoreName] = logstore
l.m.Unlock()
return logstore, nil
}
}
} else {
fmt.Printf("GetLogStore success, retry:%d, name: %s, ttl: %d, shardCount: %d, createTime: %d, lastModifyTime: %d\n", retry_times, logstore.Name, logstore.TTL, logstore.ShardCount, logstore.CreateTime, logstore.LastModifyTime)
l.m.Lock()
l.logstores[logstoreName] = logstore
l.m.Unlock()
return logstore, nil
}
}
}
func (l *Loghub) getLogstoreLogsBufferChannel(logstoreName string) chan *topicLog {
l.mlogstoreLogsBuffer.RLock()
slslchan, ok := l.logsBuffer4Logstore[logstoreName]
l.mlogstoreLogsBuffer.RUnlock()
if !ok || slslchan == nil {
slslchan = make(chan *topicLog, l.logsBufferSize4Logstore)
l.mlogstoreLogsBuffer.Lock()
l.logsBuffer4Logstore[logstoreName] = slslchan
l.mlogstoreLogsBuffer.Unlock()
}
return slslchan
}
func generateLoggroupByTopicLog(tlog *topicLog, source string) *sls.LogGroup {
logs := []*sls.Log{tlog.log}
loggroup := &sls.LogGroup{
Topic: proto.String(tlog.topic),
Source: proto.String(source),
Logs: logs,
}
return loggroup
}
func unserialize(msg *sarama.ConsumerMessage) (map[string]string, error) {
var err error
data := map[string]string{}
err = json.Unmarshal(msg.Value, &data)
if err != nil {
fmt.Printf("[unserialize sarama.ConsumerMessage] json.Unmarshal err: %v\n", err)
return nil, err
}
return data, nil
}
func generateLog(data map[string]string) (*topicLog, error) {
contents := []*sls.LogContent{}
for k, v := range data {
contents = append(contents, &sls.LogContent{
Key: proto.String(k),
Value: proto.String(v),
})
}
t, e := time.Parse("2006-01-02T15:04:05+08:00", data["@timestamp"])
if e != nil {
t = time.Now()
}
log := &sls.Log{
Time: proto.Uint32(uint32(t.Unix())),
Contents: contents,
}
tplog := &topicLog{
topic: data["topic"],
log: log,
}
return tplog, nil
}
func putLogs(logstore *sls.LogStore, loggroup *sls.LogGroup) error {
var retry_times int
var err error
// PostLogStoreLogs API Ref: https://intl.aliyun.com/help/doc-detail/29026.htm
for retry_times = 0; retry_times < 10; retry_times++ {
err = logstore.PutLogs(loggroup)
if err == nil {
fmt.Printf("PutLogs success, retry: %d\n", retry_times)
return nil
}
fmt.Printf("PutLogs fail, retry: %d, err: %s\n", retry_times, err)
//handle exception here, you can add retryable erorrCode, set appropriate put_retry
if strings.Contains(err.Error(), sls.WRITE_QUOTA_EXCEED) || strings.Contains(err.Error(), sls.PROJECT_QUOTA_EXCEED) || strings.Contains(err.Error(), sls.SHARD_WRITE_QUOTA_EXCEED) {
//you should split shard
} else if strings.Contains(err.Error(), sls.INTERNAL_SERVER_ERROR) || strings.Contains(err.Error(), sls.SERVER_BUSY) {
}
continue
}
return err
}
// func PutMsg(msg *sarama.ConsumerMessage) error {
// var err error
// data := map[string]string{}
// err = json.Unmarshal(msg.Value, &data)
// if err != nil {
// fmt.Printf("[PutMsg] json.Unmarshal err: ", err)
// return err
// }
// loggroup := generateLoggroupByTopic([]map[string]string{data}, msg.Topic, "")
// err = putLogs(logstore, loggroup)
// if err != nil {
// fmt.Printf("loghub putlogs failed. err:%v\n", err)
// return err
// }
// return nil
// }
// func PutMsgs(msgs []*sarama.ConsumerMessage) {
// fmt.Println("loghub sample begin")
// var err error
// // put logs to logstore
// datas := []map[string]string{
// {"topic": "gateway-error", "key": strconv.FormatInt(time.Now().UnixNano(), 10), "message": "hello world!"},
// {"topic": "gateway-error", "key": strconv.FormatInt(time.Now().UnixNano(), 10), "message": "hello world!"},
// {"topic": "gateway-error", "key": strconv.FormatInt(time.Now().UnixNano(), 10), "message": "hello world!"},
// }
// loggroup := generateLoggroupByTopic(datas, "gateway-error")
// err = putLogs(logstore, loggroup)
// if err != nil {
// fmt.Printf("loghub putlogs failed. err:%v\n", err)
// }
// fmt.Println("loghub sample end")
// }
// func generateLoggroupByTopic(datas []map[string]string, topic string, source string) *sls.LogGroup {
// logs := []*sls.Log{}
// for _, data := range datas {
// contents := []*sls.LogContent{}
// for k, v := range data {
// contents = append(contents, &sls.LogContent{
// Key: proto.String(k),
// Value: proto.String(v),
// })
// }
// log := &sls.Log{
// Time: proto.Uint32(uint32(time.Now().Unix())),
// Contents: contents,
// }
// logs = append(logs, log)
// }
// loggroup := &sls.LogGroup{
// Topic: proto.String(topic),
// Source: proto.String(source),
// Logs: logs,
// }
// return loggroup
// }
// func generateLog(msg *sarama.ConsumerMessage) (*sls.Log, error) {
// var err error
// data := map[string]string{}
// err = json.Unmarshal(msg.Value, &data)
// if err != nil {
// fmt.Printf("[generateLog] json.Unmarshal err: %v\n", err)
// return nil, err
// }
// contents := []*sls.LogContent{}
// for k, v := range data {
// contents = append(contents, &sls.LogContent{
// Key: proto.String(k),
// Value: proto.String(v),
// })
// }
// t, e := time.Parse("2006-01-02T15:04:05+08:00", data["@timestamp"])
// if e != nil {
// t = time.Now()
// }
// log := &sls.Log{
// Time: proto.Uint32(uint32(t.Unix())),
// Contents: contents,
// }
// return log, nil
// }
| := l.getLogstore(lsn)
if err != nil {
fmt.Printf("Loghub Start failed (logstoreName=%s). err: %v\n", lsn, err)
panic(err)
}
// 分配到topic
go l.dispatchToTopic(lsn)
for _, tp := range l.topics {
go l.processTopicMsg(lsn, tp)
}
}
// 分配消息
go l | conditional_block |
projectstate.rs | use std::collections::HashMap;
use std::fmt;
use std::mem;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Instant;
use chrono::{DateTime, Utc};
use parking_lot::RwLock;
use serde::de::DeserializeOwned;
use url::Url;
use uuid::Uuid;
use config::AortaConfig;
use event::StoreChangeset;
use query::{AortaQuery, GetProjectConfigQuery, QueryError, RequestManager};
use semaphore_common::ProjectId;
use upstream::UpstreamDescriptor;
/// These are config values that the user can modify in the UI.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct ProjectConfig {
/// URLs that are permitted for cross original JavaScript requests.
pub allowed_domains: Vec<String>,
}
/// The project state snapshot represents a known server state of
/// a project.
///
/// This is generally used by an indirection of `ProjectState` which
/// manages a view over it which supports concurrent updates in the
/// background.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ProjectStateSnapshot {
/// The timestamp of when the snapshot was received.
pub last_fetch: DateTime<Utc>,
/// The timestamp of when the last snapshot was changed.
///
/// This might be `None` in some rare cases like where snapshots
/// are faked locally.
pub last_change: Option<DateTime<Utc>>,
/// Indicates that the project is disabled.
pub disabled: bool,
/// A container of known public keys in the project.
pub public_keys: HashMap<String, bool>,
/// The project's slug if available.
pub slug: Option<String>,
/// The project's current config
pub config: ProjectConfig,
/// The project state's revision id.
pub rev: Option<Uuid>,
}
/// A helper enum indicating the public key state.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyStatus {
/// The state of the public key is not known.
///
/// This can indicate that the key is not yet known or that the
/// key just does not exist. We can not tell these two cases
/// apart as there is always a lag since the last update from the
/// upstream server. As such the project state uses a heuristic
/// to decide if it should treat a key as not existing or just
/// not yet known.
Unknown,
/// This key is known but was disabled.
Disabled,
/// This key is known and is enabled.
Enabled,
}
/// Indicates what should happen to events based on the public key
/// of a project.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyEventAction {
/// Indicates that this event should be queued.
Queue,
/// Indicates that this event should be discarded.
Discard,
/// Indicates that this event should be sent now.
Send,
}
/// An event that was not sent yet.
#[derive(Debug)]
struct PendingStore {
added_at: Instant,
changeset: StoreChangeset,
}
/// Gives access to the project's remote state.
///
/// This wrapper is sync and can be updated concurrently. As the type is
/// sync all of the methods can be used on a shared instance. The type
/// internally locks automatically.
#[derive(Debug)]
pub struct ProjectState {
config: Arc<AortaConfig>,
project_id: ProjectId,
current_snapshot: RwLock<Option<Arc<ProjectStateSnapshot>>>,
pending_stores: RwLock<Vec<PendingStore>>,
requested_new_snapshot: AtomicBool,
request_manager: Arc<RequestManager>,
last_event: RwLock<Option<DateTime<Utc>>>,
}
impl ProjectStateSnapshot {
/// Returns the current status of a key.
pub fn get_public_key_status(&self, public_key: &str) -> PublicKeyStatus {
match self.public_keys.get(public_key) {
Some(&true) => PublicKeyStatus::Enabled,
Some(&false) => PublicKeyStatus::Disabled,
None => PublicKeyStatus::Unknown,
}
}
/// Checks if a public key is enabled.
pub fn public_key_is_enabled(&self, public_key: &str) -> bool {
self.get_public_key_status(public_key) == PublicKeyStatus::Enabled
}
/// Returns `true` if the entire project should be considered
/// disabled (blackholed, deleted etc.).
pub fn disabled(&self) -> bool {
self.disabled
}
/// Returns true if the snapshot is outdated.
pub fn outdated(&self, config: &AortaConfig) -> bool {
// TODO(armin): change this to a value from the config
self.last_fetch < Utc::now() - config.snapshot_expiry
}
/// Returns the project config.
pub fn config(&self) -> &ProjectConfig {
&self.config
}
}
impl ProjectState {
/// Creates a new project state.
///
/// The project state is created without storing a snapshot. This means
/// that accessing the snapshot will panic until the data becomes available.
///
/// The config is taken as `Arc` so we can share it effectively across
/// multiple project states and troves.
pub fn new(
project_id: ProjectId,
config: Arc<AortaConfig>,
request_manager: Arc<RequestManager>,
) -> ProjectState {
ProjectState {
project_id: project_id,
config: config,
current_snapshot: RwLock::new(None),
pending_stores: RwLock::new(Vec::new()),
requested_new_snapshot: AtomicBool::new(false),
request_manager: request_manager,
last_event: RwLock::new(None),
}
}
/// Adds a query that should be issued with the next heartbeat.
pub fn add_query<Q, R, F, E>(&self, query: Q, callback: F) -> Uuid
where
Q: AortaQuery<Response = R>,
R: DeserializeOwned + 'static,
F: FnMut(&ProjectState, Result<R, QueryError>) -> Result<(), E> + Sync + Send + 'static,
E: fmt::Debug,
{
self.request_manager
.add_query(self.project_id(), query, callback)
}
/// The project ID of this project.
pub fn project_id(&self) -> ProjectId {
self.project_id
}
/// The direct upstream that reported the snapshot.
///
/// Currently an relay only ever has one trove and that trove can only have
/// one upstream descriptor. As a result of this, this descriptor will always
/// match the one of the trove which holds the project state.
pub fn upstream(&self) -> &UpstreamDescriptor {
&self.config.upstream
}
/// Returns the time of the last event received (but not forwarded).
///
/// This timestamp is used to indicate that the project has activity
/// for the trove. As the trove needs to expire items it uses this
/// timestamp to expire.
pub fn last_event(&self) -> Option<DateTime<Utc>> |
/// Returns the time of the last config fetch.
pub fn last_config_fetch(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.map(|x| x.last_fetch.clone())
}
/// Returns the time of the last config change.
pub fn last_config_change(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.and_then(|x| x.last_change.clone())
}
/// Requests an update to the project config to be fetched.
pub fn request_updated_project_config(&self) {
if self.requested_new_snapshot
.compare_and_swap(false, true, Ordering::Relaxed)
{
return;
}
debug!("requesting updated project config for {}", self.project_id);
self.add_query(GetProjectConfigQuery, move |ps, rv| -> Result<(), ()> {
if let Ok(snapshot_opt) = rv {
ps.requested_new_snapshot.store(false, Ordering::Relaxed);
match snapshot_opt {
Some(snapshot) => ps.set_snapshot(snapshot),
None => ps.set_missing_snapshot(),
}
} else {
// TODO: error handling
rv.unwrap();
}
Ok(())
});
}
/// Checks if events should be buffered for a public key.
///
/// Events should be buffered until a key becomes available nor we
/// absolutely know that the key does not exist. There is some
/// internal logic here that based on the age of the snapshot and
/// some global disabled settings will indicate different behaviors.
pub fn get_public_key_event_action(&self, public_key: &str) -> PublicKeyEventAction {
match *self.current_snapshot.read() {
Some(ref snapshot) => {
// in case the entire project is disabled we always discard
// events unless the snapshot is outdated. In case the
// snapshot is outdated we might fall back to sending or
// discarding as well based on the key status.
if !snapshot.outdated(&self.config) && snapshot.disabled() {
return PublicKeyEventAction::Discard;
}
match snapshot.get_public_key_status(public_key) {
PublicKeyStatus::Enabled => PublicKeyEventAction::Send,
PublicKeyStatus::Disabled => PublicKeyEventAction::Discard,
PublicKeyStatus::Unknown => {
// we don't know the key yet, ensure we fetch it on the next
// heartbeat.
self.request_updated_project_config();
// if the last config fetch was more than a minute ago we just
// accept the event because at this point the dsn might have
// become available upstream.
if snapshot.outdated(&self.config) {
PublicKeyEventAction::Queue
// we just assume the config did not change in the last 60
// seconds and the dsn is indeed not seen yet.
} else {
PublicKeyEventAction::Discard
}
}
}
}
// in the absence of a snapshot we generally queue
None => {
self.request_updated_project_config();
PublicKeyEventAction::Queue
}
}
}
/// Validates the origin.
pub fn is_valid_origin(&self, origin: &Url) -> bool {
self.snapshot_opt().map_or(true, |snapshot| {
let allowed = &snapshot.config().allowed_domains;
!allowed.is_empty()
&& allowed
.iter()
.any(|x| x.as_str() == "*" || Some(x.as_str()) == origin.host_str())
})
}
/// Given a public key and an event this handles an event.
///
/// It either puts it into an internal queue, sends it or discards it. If the item
/// was discarded `false` is returned.
pub fn store_changeset<'a>(&self, changeset: StoreChangeset) -> bool {
if let Some(ref origin) = changeset.meta.origin {
if !self.is_valid_origin(origin) {
debug!(
"{}#{} -> access denied (bad origin {})",
self.project_id, changeset.event, origin
);
return false;
}
}
match self.get_public_key_event_action(&changeset.public_key) {
PublicKeyEventAction::Queue => {
debug!("{}#{} -> pending", self.project_id, changeset.event);
self.pending_stores.write().push(PendingStore {
added_at: Instant::now(),
changeset,
});
true
}
PublicKeyEventAction::Send => {
debug!("{}#{} -> changeset", self.project_id, changeset.event);
self.request_manager
.add_changeset(self.project_id, changeset);
true
}
PublicKeyEventAction::Discard => {
debug!("{}#{} -> discarded", self.project_id, changeset.event);
false
}
}
}
/// Returns `true` if the project state is available.
pub fn snapshot_available(&self) -> bool {
self.current_snapshot.read().is_some()
}
/// Returns the current project snapshot.
pub fn snapshot(&self) -> Arc<ProjectStateSnapshot> {
self.snapshot_opt().expect("Snapshot not yet available")
}
/// Returns the current project snapshot as option.
pub fn snapshot_opt(&self) -> Option<Arc<ProjectStateSnapshot>> {
match *self.current_snapshot.read() {
Some(ref arc) => Some(arc.clone()),
None => None,
}
}
/// Sets a new snapshot.
pub fn set_snapshot(&self, new_snapshot: ProjectStateSnapshot) {
*self.current_snapshot.write() = Some(Arc::new(new_snapshot));
self.retry_pending_events();
}
/// Attempts to send all pending requests now.
fn retry_pending_events(&self) {
let snapshot = self.snapshot();
let mut to_send = vec![];
let timeout = self.config.pending_events_timeout.to_std().unwrap();
// acquire the lock locally so we can then acquire the lock later on send
// without deadlocking
{
let mut lock = self.pending_stores.write();
let pending = mem::replace(&mut *lock, Vec::new());
lock.extend(pending.into_iter().filter_map(|pending_store| {
if pending_store.added_at.elapsed() > timeout {
return None;
}
match snapshot.get_public_key_status(&pending_store.changeset.public_key) {
PublicKeyStatus::Enabled => {
to_send.push(pending_store);
None
}
PublicKeyStatus::Disabled => None,
PublicKeyStatus::Unknown => Some(pending_store),
}
}));
}
for pending_store in to_send {
debug!(
"unpend {}#{}",
self.project_id, pending_store.changeset.event
);
self.store_changeset(pending_store.changeset);
}
}
/// Sets a "project does not exist" snapshot.
///
/// This is used when the server indicates that this project does not actually
/// exist or the relay has no permissions to work with it (these are both
/// reported as the same thing to the relay).
pub fn set_missing_snapshot(&self) {
self.set_snapshot(ProjectStateSnapshot {
last_fetch: Utc::now(),
last_change: None,
disabled: true,
public_keys: HashMap::new(),
slug: None,
config: Default::default(),
rev: None,
})
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_assert_sync() {
struct Assert<T: Sync> {
x: Option<T>,
}
let val: Assert<ProjectState> = Assert { x: None };
assert_eq!(val.x.is_none(), true);
}
}
| {
*self.last_event.read()
} | identifier_body |
projectstate.rs | use std::collections::HashMap;
use std::fmt;
use std::mem;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Instant;
use chrono::{DateTime, Utc};
use parking_lot::RwLock;
use serde::de::DeserializeOwned;
use url::Url;
use uuid::Uuid;
use config::AortaConfig;
use event::StoreChangeset;
use query::{AortaQuery, GetProjectConfigQuery, QueryError, RequestManager};
use semaphore_common::ProjectId;
use upstream::UpstreamDescriptor;
/// These are config values that the user can modify in the UI.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct | {
/// URLs that are permitted for cross original JavaScript requests.
pub allowed_domains: Vec<String>,
}
/// The project state snapshot represents a known server state of
/// a project.
///
/// This is generally used by an indirection of `ProjectState` which
/// manages a view over it which supports concurrent updates in the
/// background.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ProjectStateSnapshot {
/// The timestamp of when the snapshot was received.
pub last_fetch: DateTime<Utc>,
/// The timestamp of when the last snapshot was changed.
///
/// This might be `None` in some rare cases like where snapshots
/// are faked locally.
pub last_change: Option<DateTime<Utc>>,
/// Indicates that the project is disabled.
pub disabled: bool,
/// A container of known public keys in the project.
pub public_keys: HashMap<String, bool>,
/// The project's slug if available.
pub slug: Option<String>,
/// The project's current config
pub config: ProjectConfig,
/// The project state's revision id.
pub rev: Option<Uuid>,
}
/// A helper enum indicating the public key state.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyStatus {
/// The state of the public key is not known.
///
/// This can indicate that the key is not yet known or that the
/// key just does not exist. We can not tell these two cases
/// apart as there is always a lag since the last update from the
/// upstream server. As such the project state uses a heuristic
/// to decide if it should treat a key as not existing or just
/// not yet known.
Unknown,
/// This key is known but was disabled.
Disabled,
/// This key is known and is enabled.
Enabled,
}
/// Indicates what should happen to events based on the public key
/// of a project.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyEventAction {
/// Indicates that this event should be queued.
Queue,
/// Indicates that this event should be discarded.
Discard,
/// Indicates that this event should be sent now.
Send,
}
/// An event that was not sent yet.
#[derive(Debug)]
struct PendingStore {
added_at: Instant,
changeset: StoreChangeset,
}
/// Gives access to the project's remote state.
///
/// This wrapper is sync and can be updated concurrently. As the type is
/// sync all of the methods can be used on a shared instance. The type
/// internally locks automatically.
#[derive(Debug)]
pub struct ProjectState {
config: Arc<AortaConfig>,
project_id: ProjectId,
current_snapshot: RwLock<Option<Arc<ProjectStateSnapshot>>>,
pending_stores: RwLock<Vec<PendingStore>>,
requested_new_snapshot: AtomicBool,
request_manager: Arc<RequestManager>,
last_event: RwLock<Option<DateTime<Utc>>>,
}
impl ProjectStateSnapshot {
/// Returns the current status of a key.
pub fn get_public_key_status(&self, public_key: &str) -> PublicKeyStatus {
match self.public_keys.get(public_key) {
Some(&true) => PublicKeyStatus::Enabled,
Some(&false) => PublicKeyStatus::Disabled,
None => PublicKeyStatus::Unknown,
}
}
/// Checks if a public key is enabled.
pub fn public_key_is_enabled(&self, public_key: &str) -> bool {
self.get_public_key_status(public_key) == PublicKeyStatus::Enabled
}
/// Returns `true` if the entire project should be considered
/// disabled (blackholed, deleted etc.).
pub fn disabled(&self) -> bool {
self.disabled
}
/// Returns true if the snapshot is outdated.
pub fn outdated(&self, config: &AortaConfig) -> bool {
// TODO(armin): change this to a value from the config
self.last_fetch < Utc::now() - config.snapshot_expiry
}
/// Returns the project config.
pub fn config(&self) -> &ProjectConfig {
&self.config
}
}
impl ProjectState {
/// Creates a new project state.
///
/// The project state is created without storing a snapshot. This means
/// that accessing the snapshot will panic until the data becomes available.
///
/// The config is taken as `Arc` so we can share it effectively across
/// multiple project states and troves.
pub fn new(
project_id: ProjectId,
config: Arc<AortaConfig>,
request_manager: Arc<RequestManager>,
) -> ProjectState {
ProjectState {
project_id: project_id,
config: config,
current_snapshot: RwLock::new(None),
pending_stores: RwLock::new(Vec::new()),
requested_new_snapshot: AtomicBool::new(false),
request_manager: request_manager,
last_event: RwLock::new(None),
}
}
/// Adds a query that should be issued with the next heartbeat.
pub fn add_query<Q, R, F, E>(&self, query: Q, callback: F) -> Uuid
where
Q: AortaQuery<Response = R>,
R: DeserializeOwned + 'static,
F: FnMut(&ProjectState, Result<R, QueryError>) -> Result<(), E> + Sync + Send + 'static,
E: fmt::Debug,
{
self.request_manager
.add_query(self.project_id(), query, callback)
}
/// The project ID of this project.
pub fn project_id(&self) -> ProjectId {
self.project_id
}
/// The direct upstream that reported the snapshot.
///
/// Currently an relay only ever has one trove and that trove can only have
/// one upstream descriptor. As a result of this, this descriptor will always
/// match the one of the trove which holds the project state.
pub fn upstream(&self) -> &UpstreamDescriptor {
&self.config.upstream
}
/// Returns the time of the last event received (but not forwarded).
///
/// This timestamp is used to indicate that the project has activity
/// for the trove. As the trove needs to expire items it uses this
/// timestamp to expire.
pub fn last_event(&self) -> Option<DateTime<Utc>> {
*self.last_event.read()
}
/// Returns the time of the last config fetch.
pub fn last_config_fetch(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.map(|x| x.last_fetch.clone())
}
/// Returns the time of the last config change.
pub fn last_config_change(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.and_then(|x| x.last_change.clone())
}
/// Requests an update to the project config to be fetched.
pub fn request_updated_project_config(&self) {
if self.requested_new_snapshot
.compare_and_swap(false, true, Ordering::Relaxed)
{
return;
}
debug!("requesting updated project config for {}", self.project_id);
self.add_query(GetProjectConfigQuery, move |ps, rv| -> Result<(), ()> {
if let Ok(snapshot_opt) = rv {
ps.requested_new_snapshot.store(false, Ordering::Relaxed);
match snapshot_opt {
Some(snapshot) => ps.set_snapshot(snapshot),
None => ps.set_missing_snapshot(),
}
} else {
// TODO: error handling
rv.unwrap();
}
Ok(())
});
}
/// Checks if events should be buffered for a public key.
///
/// Events should be buffered until a key becomes available nor we
/// absolutely know that the key does not exist. There is some
/// internal logic here that based on the age of the snapshot and
/// some global disabled settings will indicate different behaviors.
pub fn get_public_key_event_action(&self, public_key: &str) -> PublicKeyEventAction {
match *self.current_snapshot.read() {
Some(ref snapshot) => {
// in case the entire project is disabled we always discard
// events unless the snapshot is outdated. In case the
// snapshot is outdated we might fall back to sending or
// discarding as well based on the key status.
if !snapshot.outdated(&self.config) && snapshot.disabled() {
return PublicKeyEventAction::Discard;
}
match snapshot.get_public_key_status(public_key) {
PublicKeyStatus::Enabled => PublicKeyEventAction::Send,
PublicKeyStatus::Disabled => PublicKeyEventAction::Discard,
PublicKeyStatus::Unknown => {
// we don't know the key yet, ensure we fetch it on the next
// heartbeat.
self.request_updated_project_config();
// if the last config fetch was more than a minute ago we just
// accept the event because at this point the dsn might have
// become available upstream.
if snapshot.outdated(&self.config) {
PublicKeyEventAction::Queue
// we just assume the config did not change in the last 60
// seconds and the dsn is indeed not seen yet.
} else {
PublicKeyEventAction::Discard
}
}
}
}
// in the absence of a snapshot we generally queue
None => {
self.request_updated_project_config();
PublicKeyEventAction::Queue
}
}
}
/// Validates the origin.
pub fn is_valid_origin(&self, origin: &Url) -> bool {
self.snapshot_opt().map_or(true, |snapshot| {
let allowed = &snapshot.config().allowed_domains;
!allowed.is_empty()
&& allowed
.iter()
.any(|x| x.as_str() == "*" || Some(x.as_str()) == origin.host_str())
})
}
/// Given a public key and an event this handles an event.
///
/// It either puts it into an internal queue, sends it or discards it. If the item
/// was discarded `false` is returned.
pub fn store_changeset<'a>(&self, changeset: StoreChangeset) -> bool {
if let Some(ref origin) = changeset.meta.origin {
if !self.is_valid_origin(origin) {
debug!(
"{}#{} -> access denied (bad origin {})",
self.project_id, changeset.event, origin
);
return false;
}
}
match self.get_public_key_event_action(&changeset.public_key) {
PublicKeyEventAction::Queue => {
debug!("{}#{} -> pending", self.project_id, changeset.event);
self.pending_stores.write().push(PendingStore {
added_at: Instant::now(),
changeset,
});
true
}
PublicKeyEventAction::Send => {
debug!("{}#{} -> changeset", self.project_id, changeset.event);
self.request_manager
.add_changeset(self.project_id, changeset);
true
}
PublicKeyEventAction::Discard => {
debug!("{}#{} -> discarded", self.project_id, changeset.event);
false
}
}
}
/// Returns `true` if the project state is available.
pub fn snapshot_available(&self) -> bool {
self.current_snapshot.read().is_some()
}
/// Returns the current project snapshot.
pub fn snapshot(&self) -> Arc<ProjectStateSnapshot> {
self.snapshot_opt().expect("Snapshot not yet available")
}
/// Returns the current project snapshot as option.
pub fn snapshot_opt(&self) -> Option<Arc<ProjectStateSnapshot>> {
match *self.current_snapshot.read() {
Some(ref arc) => Some(arc.clone()),
None => None,
}
}
/// Sets a new snapshot.
pub fn set_snapshot(&self, new_snapshot: ProjectStateSnapshot) {
*self.current_snapshot.write() = Some(Arc::new(new_snapshot));
self.retry_pending_events();
}
/// Attempts to send all pending requests now.
fn retry_pending_events(&self) {
let snapshot = self.snapshot();
let mut to_send = vec![];
let timeout = self.config.pending_events_timeout.to_std().unwrap();
// acquire the lock locally so we can then acquire the lock later on send
// without deadlocking
{
let mut lock = self.pending_stores.write();
let pending = mem::replace(&mut *lock, Vec::new());
lock.extend(pending.into_iter().filter_map(|pending_store| {
if pending_store.added_at.elapsed() > timeout {
return None;
}
match snapshot.get_public_key_status(&pending_store.changeset.public_key) {
PublicKeyStatus::Enabled => {
to_send.push(pending_store);
None
}
PublicKeyStatus::Disabled => None,
PublicKeyStatus::Unknown => Some(pending_store),
}
}));
}
for pending_store in to_send {
debug!(
"unpend {}#{}",
self.project_id, pending_store.changeset.event
);
self.store_changeset(pending_store.changeset);
}
}
/// Sets a "project does not exist" snapshot.
///
/// This is used when the server indicates that this project does not actually
/// exist or the relay has no permissions to work with it (these are both
/// reported as the same thing to the relay).
pub fn set_missing_snapshot(&self) {
self.set_snapshot(ProjectStateSnapshot {
last_fetch: Utc::now(),
last_change: None,
disabled: true,
public_keys: HashMap::new(),
slug: None,
config: Default::default(),
rev: None,
})
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_assert_sync() {
struct Assert<T: Sync> {
x: Option<T>,
}
let val: Assert<ProjectState> = Assert { x: None };
assert_eq!(val.x.is_none(), true);
}
}
| ProjectConfig | identifier_name |
projectstate.rs | use std::collections::HashMap;
use std::fmt;
use std::mem;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Instant;
use chrono::{DateTime, Utc};
use parking_lot::RwLock;
use serde::de::DeserializeOwned;
use url::Url;
use uuid::Uuid;
use config::AortaConfig;
use event::StoreChangeset;
use query::{AortaQuery, GetProjectConfigQuery, QueryError, RequestManager};
use semaphore_common::ProjectId;
use upstream::UpstreamDescriptor;
/// These are config values that the user can modify in the UI.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct ProjectConfig {
/// URLs that are permitted for cross original JavaScript requests.
pub allowed_domains: Vec<String>,
}
/// The project state snapshot represents a known server state of
/// a project.
///
/// This is generally used by an indirection of `ProjectState` which
/// manages a view over it which supports concurrent updates in the
/// background.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ProjectStateSnapshot {
/// The timestamp of when the snapshot was received.
pub last_fetch: DateTime<Utc>,
/// The timestamp of when the last snapshot was changed.
///
/// This might be `None` in some rare cases like where snapshots
/// are faked locally.
pub last_change: Option<DateTime<Utc>>,
/// Indicates that the project is disabled.
pub disabled: bool,
/// A container of known public keys in the project.
pub public_keys: HashMap<String, bool>,
/// The project's slug if available.
pub slug: Option<String>,
/// The project's current config
pub config: ProjectConfig,
/// The project state's revision id.
pub rev: Option<Uuid>,
}
/// A helper enum indicating the public key state.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyStatus {
/// The state of the public key is not known.
///
/// This can indicate that the key is not yet known or that the
/// key just does not exist. We can not tell these two cases
/// apart as there is always a lag since the last update from the
/// upstream server. As such the project state uses a heuristic
/// to decide if it should treat a key as not existing or just
/// not yet known.
Unknown,
/// This key is known but was disabled.
Disabled,
/// This key is known and is enabled.
Enabled,
}
/// Indicates what should happen to events based on the public key
/// of a project.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyEventAction {
/// Indicates that this event should be queued.
Queue,
/// Indicates that this event should be discarded.
Discard,
/// Indicates that this event should be sent now.
Send,
}
/// An event that was not sent yet.
#[derive(Debug)]
struct PendingStore {
added_at: Instant,
changeset: StoreChangeset,
}
/// Gives access to the project's remote state.
///
/// This wrapper is sync and can be updated concurrently. As the type is
/// sync all of the methods can be used on a shared instance. The type
/// internally locks automatically.
#[derive(Debug)]
pub struct ProjectState {
config: Arc<AortaConfig>,
project_id: ProjectId,
current_snapshot: RwLock<Option<Arc<ProjectStateSnapshot>>>,
pending_stores: RwLock<Vec<PendingStore>>,
requested_new_snapshot: AtomicBool,
request_manager: Arc<RequestManager>,
last_event: RwLock<Option<DateTime<Utc>>>,
}
impl ProjectStateSnapshot {
/// Returns the current status of a key.
pub fn get_public_key_status(&self, public_key: &str) -> PublicKeyStatus {
match self.public_keys.get(public_key) {
Some(&true) => PublicKeyStatus::Enabled,
Some(&false) => PublicKeyStatus::Disabled,
None => PublicKeyStatus::Unknown,
}
}
/// Checks if a public key is enabled.
pub fn public_key_is_enabled(&self, public_key: &str) -> bool {
self.get_public_key_status(public_key) == PublicKeyStatus::Enabled
}
/// Returns `true` if the entire project should be considered
/// disabled (blackholed, deleted etc.).
pub fn disabled(&self) -> bool {
self.disabled
}
/// Returns true if the snapshot is outdated.
pub fn outdated(&self, config: &AortaConfig) -> bool {
// TODO(armin): change this to a value from the config
self.last_fetch < Utc::now() - config.snapshot_expiry
}
/// Returns the project config.
pub fn config(&self) -> &ProjectConfig {
&self.config
}
}
impl ProjectState {
/// Creates a new project state.
///
/// The project state is created without storing a snapshot. This means
/// that accessing the snapshot will panic until the data becomes available.
///
/// The config is taken as `Arc` so we can share it effectively across
/// multiple project states and troves.
pub fn new(
project_id: ProjectId,
config: Arc<AortaConfig>,
request_manager: Arc<RequestManager>,
) -> ProjectState {
ProjectState {
project_id: project_id,
config: config,
current_snapshot: RwLock::new(None),
pending_stores: RwLock::new(Vec::new()),
requested_new_snapshot: AtomicBool::new(false),
request_manager: request_manager,
last_event: RwLock::new(None),
}
}
/// Adds a query that should be issued with the next heartbeat.
pub fn add_query<Q, R, F, E>(&self, query: Q, callback: F) -> Uuid
where
Q: AortaQuery<Response = R>,
R: DeserializeOwned + 'static,
F: FnMut(&ProjectState, Result<R, QueryError>) -> Result<(), E> + Sync + Send + 'static,
E: fmt::Debug,
{
self.request_manager
.add_query(self.project_id(), query, callback)
}
/// The project ID of this project.
pub fn project_id(&self) -> ProjectId {
self.project_id
}
/// The direct upstream that reported the snapshot.
///
/// Currently an relay only ever has one trove and that trove can only have
/// one upstream descriptor. As a result of this, this descriptor will always
/// match the one of the trove which holds the project state.
pub fn upstream(&self) -> &UpstreamDescriptor {
&self.config.upstream
}
/// Returns the time of the last event received (but not forwarded).
///
/// This timestamp is used to indicate that the project has activity
/// for the trove. As the trove needs to expire items it uses this
/// timestamp to expire.
pub fn last_event(&self) -> Option<DateTime<Utc>> {
*self.last_event.read()
}
/// Returns the time of the last config fetch.
pub fn last_config_fetch(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.map(|x| x.last_fetch.clone())
}
/// Returns the time of the last config change.
pub fn last_config_change(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref() | .and_then(|x| x.last_change.clone())
}
/// Requests an update to the project config to be fetched.
pub fn request_updated_project_config(&self) {
if self.requested_new_snapshot
.compare_and_swap(false, true, Ordering::Relaxed)
{
return;
}
debug!("requesting updated project config for {}", self.project_id);
self.add_query(GetProjectConfigQuery, move |ps, rv| -> Result<(), ()> {
if let Ok(snapshot_opt) = rv {
ps.requested_new_snapshot.store(false, Ordering::Relaxed);
match snapshot_opt {
Some(snapshot) => ps.set_snapshot(snapshot),
None => ps.set_missing_snapshot(),
}
} else {
// TODO: error handling
rv.unwrap();
}
Ok(())
});
}
/// Checks if events should be buffered for a public key.
///
/// Events should be buffered until a key becomes available nor we
/// absolutely know that the key does not exist. There is some
/// internal logic here that based on the age of the snapshot and
/// some global disabled settings will indicate different behaviors.
pub fn get_public_key_event_action(&self, public_key: &str) -> PublicKeyEventAction {
match *self.current_snapshot.read() {
Some(ref snapshot) => {
// in case the entire project is disabled we always discard
// events unless the snapshot is outdated. In case the
// snapshot is outdated we might fall back to sending or
// discarding as well based on the key status.
if !snapshot.outdated(&self.config) && snapshot.disabled() {
return PublicKeyEventAction::Discard;
}
match snapshot.get_public_key_status(public_key) {
PublicKeyStatus::Enabled => PublicKeyEventAction::Send,
PublicKeyStatus::Disabled => PublicKeyEventAction::Discard,
PublicKeyStatus::Unknown => {
// we don't know the key yet, ensure we fetch it on the next
// heartbeat.
self.request_updated_project_config();
// if the last config fetch was more than a minute ago we just
// accept the event because at this point the dsn might have
// become available upstream.
if snapshot.outdated(&self.config) {
PublicKeyEventAction::Queue
// we just assume the config did not change in the last 60
// seconds and the dsn is indeed not seen yet.
} else {
PublicKeyEventAction::Discard
}
}
}
}
// in the absence of a snapshot we generally queue
None => {
self.request_updated_project_config();
PublicKeyEventAction::Queue
}
}
}
/// Validates the origin.
pub fn is_valid_origin(&self, origin: &Url) -> bool {
self.snapshot_opt().map_or(true, |snapshot| {
let allowed = &snapshot.config().allowed_domains;
!allowed.is_empty()
&& allowed
.iter()
.any(|x| x.as_str() == "*" || Some(x.as_str()) == origin.host_str())
})
}
/// Given a public key and an event this handles an event.
///
/// It either puts it into an internal queue, sends it or discards it. If the item
/// was discarded `false` is returned.
pub fn store_changeset<'a>(&self, changeset: StoreChangeset) -> bool {
if let Some(ref origin) = changeset.meta.origin {
if !self.is_valid_origin(origin) {
debug!(
"{}#{} -> access denied (bad origin {})",
self.project_id, changeset.event, origin
);
return false;
}
}
match self.get_public_key_event_action(&changeset.public_key) {
PublicKeyEventAction::Queue => {
debug!("{}#{} -> pending", self.project_id, changeset.event);
self.pending_stores.write().push(PendingStore {
added_at: Instant::now(),
changeset,
});
true
}
PublicKeyEventAction::Send => {
debug!("{}#{} -> changeset", self.project_id, changeset.event);
self.request_manager
.add_changeset(self.project_id, changeset);
true
}
PublicKeyEventAction::Discard => {
debug!("{}#{} -> discarded", self.project_id, changeset.event);
false
}
}
}
/// Returns `true` if the project state is available.
pub fn snapshot_available(&self) -> bool {
self.current_snapshot.read().is_some()
}
/// Returns the current project snapshot.
pub fn snapshot(&self) -> Arc<ProjectStateSnapshot> {
self.snapshot_opt().expect("Snapshot not yet available")
}
/// Returns the current project snapshot as option.
pub fn snapshot_opt(&self) -> Option<Arc<ProjectStateSnapshot>> {
match *self.current_snapshot.read() {
Some(ref arc) => Some(arc.clone()),
None => None,
}
}
/// Sets a new snapshot.
pub fn set_snapshot(&self, new_snapshot: ProjectStateSnapshot) {
*self.current_snapshot.write() = Some(Arc::new(new_snapshot));
self.retry_pending_events();
}
/// Attempts to send all pending requests now.
fn retry_pending_events(&self) {
let snapshot = self.snapshot();
let mut to_send = vec![];
let timeout = self.config.pending_events_timeout.to_std().unwrap();
// acquire the lock locally so we can then acquire the lock later on send
// without deadlocking
{
let mut lock = self.pending_stores.write();
let pending = mem::replace(&mut *lock, Vec::new());
lock.extend(pending.into_iter().filter_map(|pending_store| {
if pending_store.added_at.elapsed() > timeout {
return None;
}
match snapshot.get_public_key_status(&pending_store.changeset.public_key) {
PublicKeyStatus::Enabled => {
to_send.push(pending_store);
None
}
PublicKeyStatus::Disabled => None,
PublicKeyStatus::Unknown => Some(pending_store),
}
}));
}
for pending_store in to_send {
debug!(
"unpend {}#{}",
self.project_id, pending_store.changeset.event
);
self.store_changeset(pending_store.changeset);
}
}
/// Sets a "project does not exist" snapshot.
///
/// This is used when the server indicates that this project does not actually
/// exist or the relay has no permissions to work with it (these are both
/// reported as the same thing to the relay).
pub fn set_missing_snapshot(&self) {
self.set_snapshot(ProjectStateSnapshot {
last_fetch: Utc::now(),
last_change: None,
disabled: true,
public_keys: HashMap::new(),
slug: None,
config: Default::default(),
rev: None,
})
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_assert_sync() {
struct Assert<T: Sync> {
x: Option<T>,
}
let val: Assert<ProjectState> = Assert { x: None };
assert_eq!(val.x.is_none(), true);
}
} | random_line_split | |
exportAnswersToXLSX.py | #!/usr/bin/env python3
# This is a tool to export the WA framework answers to a XLSX file
#
# This code is only for use in Well-Architected labs
# *** NOT FOR PRODUCTION USE ***
#
# Licensed under the Apache 2.0 and MITnoAttr License.
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
# https://aws.amazon.com/apache2.0/
import botocore
import boto3
import json
import datetime
import logging
import jmespath
import xlsxwriter
import argparse
from pkg_resources import packaging
import urllib.request
from bs4 import BeautifulSoup, NavigableString, Tag
__author__ = "Eric Pullen"
__email__ = "eppullen@amazon.com"
__copyright__ = "Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved."
__credits__ = ["Eric Pullen"]
__version__ = "0.1"
# Default region listed here
REGION_NAME = "us-east-1"
blankjson = {}
response = ""
# Setup Logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger()
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('s3transfer').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
PARSER = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
This utility has two options to run:
------------------------------------
1) If you provide a workloadid, this will gather all of the answers across all Well-Architected Lenss and export them to a spreadsheet.
2) If you do not provide a workloadid, the utility will generate a TEMP workload and auto-answer every question. It will then generate a spreadsheet with all of the questions, best practices, and even the improvement plan links for each.
'''
)
PARSER.add_argument('-p','--profile', required=False, default="default", help='AWS CLI Profile Name')
PARSER.add_argument('-r','--region', required=False, default="us-east-1", help='From Region Name. Example: us-east-1')
PARSER.add_argument('-w','--workloadid', required=False, default="", help='Workload Id to use instead of creating a TEMP workload')
PARSER.add_argument('-k','--keeptempworkload', action='store_true', help='If you want to keep the TEMP workload created at the end of the export')
PARSER.add_argument('-f','--fileName', required=True, default="./demo.xlsx", help='FileName to export XLSX')
PARSER.add_argument('-v','--debug', action='store_true', help='print debug messages to stderr')
ARGUMENTS = PARSER.parse_args()
PROFILE = ARGUMENTS.profile
FILENAME = ARGUMENTS.fileName
REGION_NAME = ARGUMENTS.region
WORKLOADID = ARGUMENTS.workloadid
KEEPTEMP = ARGUMENTS.keeptempworkload
if ARGUMENTS.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# To map our short hand names in the console to the API defined pillars
# Example: print(PILLAR_PARSE_MAP['performance'])
PILLAR_PARSE_MAP = {
"operationalExcellence": "OPS",
"security": "SEC",
"reliability": "REL",
"performance": "PERF",
"costOptimization": "COST"
}
PILLAR_PROPER_NAME_MAP = {
"operationalExcellence": "Operational Excellence",
"security": "Security",
"reliability": "Reliability",
"performance": "Performance Efficiency",
"costOptimization": "Cost Optimization"
}
# Helper class to convert a datetime item to JSON.
class DateTimeEncoder(json.JSONEncoder):
def default(self, z):
if isinstance(z, datetime.datetime):
return (str(z))
else:
return super().default(z)
def CreateNewWorkload(
waclient,
workloadName,
description,
reviewOwner,
environment,
awsRegions,
lenses,
tags,
pillarPriorities,
notes="",
nonAwsRegions=[],
architecturalDesign='',
industryType='',
industry='',
accountIds=[]
):
# Create your workload
try:
response=waclient.create_workload(
WorkloadName=workloadName,
Description=description,
ReviewOwner=reviewOwner,
Environment=environment,
AwsRegions=awsRegions,
Lenses=lenses,
NonAwsRegions=nonAwsRegions,
ArchitecturalDesign=architecturalDesign,
IndustryType=industryType,
Industry=industry,
Notes=notes,
AccountIds=accountIds
)
except waclient.exceptions.ConflictException as e:
workloadId,workloadARN = FindWorkload(waclient,workloadName)
logger.error("ERROR - The workload name %s already exists as workloadId %s" % (workloadName, workloadId))
return workloadId, workloadARN
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
workloadId = response['WorkloadId']
workloadARN = response['WorkloadArn']
return workloadId, workloadARN
def FindWorkload(
waclient,
workloadName
):
# Finding your WorkloadId
try:
response=waclient.list_workloads(
WorkloadNamePrefix=workloadName
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print("Full JSON:",json.dumps(response['WorkloadSummaries'], cls=DateTimeEncoder))
workloadId = response['WorkloadSummaries'][0]['WorkloadId']
workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']
# print("WorkloadId",workloadId)
return workloadId, workloadArn
def DeleteWorkload(
waclient,
workloadId
):
# Delete the WorkloadId
try:
response=waclient.delete_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
def GetWorkload(
waclient,
workloadId
):
# Get the WorkloadId
try:
response=waclient.get_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
exit()
# print("Full JSON:",json.dumps(response['Workload'], cls=DateTimeEncoder))
workload = response['Workload']
# print("WorkloadId",workloadId)
return workload
def listLens(
waclient
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
lenses = jmespath.search("LensSummaries[*].LensAlias", response)
return lenses
def getCurrentLensVersion(
waclient,
lensAlias
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
searchString = "LensSummaries[?LensAlias==`"+lensAlias+"`].LensVersion"
lenses = jmespath.search(searchString, response)
return lenses[0]
def | (
waclient,
workloadId,
lensAlias
):
answers = []
# Due to a bug in some lenses, I have to iterate over each pillar in order to
# retrieve the correct results.
for pillar in PILLAR_PARSE_MAP:
logger.debug("Grabbing answers for %s %s" % (lensAlias, pillar))
# Find a questionID using the questionTitle
try:
response=waclient.list_answers(
WorkloadId=workloadId,
LensAlias=lensAlias,
PillarId=pillar
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
while "NextToken" in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,LensAlias=lensAlias,PillarId=pillar,NextToken=response["NextToken"])
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
return answers
def getQuestionDetails(
waclient,
workloadId,
lensAlias,
questionId
):
# Find a answer for a questionId
try:
response=waclient.get_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
qDescription = jmespath.search("Answer.QuestionDescription", response)
qImprovementPlanUrl = jmespath.search("Answer.ImprovementPlanUrl", response)
qHelpfulResourceUrl = jmespath.search("Answer.HelpfulResourceUrl", response)
qNotes = jmespath.search("Answer.Notes", response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(
waclient,
workloadId,
lensAlias,
questionId,
selectedChoices,
notes
):
# Update a answer to a question
try:
response=waclient.update_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId,
SelectedChoices=selectedChoices,
Notes=notes
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
jmesquery = "Answer.SelectedChoices"
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(
waclient,
workloadId,
lensAlias,
QuestionId,
PillarId,
ImprovementPlanUrl,
ChoiceList
):
# This will parse the IP Items to gather the links we need
response = {}
htmlString = ""
# unanswered = getUnansweredForQuestion(waclient,workloadId,'wellarchitected',QuestionId)
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line,features="html.parser")
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(
ImprovementPlanUrl,
PillarId
):
logger.debug("ImprovementPlanUrl: %s for pillar %s " % (ImprovementPlanUrl,PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
# Grab the number of the step we are referencing
# This will work as long as their are less than 99 steps.
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
#Generate the string for the step number
firstItem = "step"+stepNumber
secondItem = ("step"+str((int(stepNumber)+1)))
logger.debug ("Going from %s to %s" % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ""
questionIdText = ""
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i,features="html.parser")
questionIdText = str(bsparse.text).split(':')[0].strip()
if (secondItem in i) or ("</div>" in i):
foundit = 0
if firstItem in i:
foundit = 1
ipString+=i
elif foundit:
ipString+=i
prettyHTML = BeautifulSoup(ipString,features="html.parser")
# Need to remove all of the "local glossary links" since they point to relative paths
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(
WACLIENT,
workloadId,
lens,
workbook,
allQuestionsForLens,
workloadName="",
AWSAccountId="",
workloadDescription=""
):
# Setup some formatting for the workbook
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True
})
bold_border_bold = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True,
'font_size': 20,
'bold': True
})
heading = workbook.add_format({
'font_size': 24,
'bold': True
})
lineA = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineB = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineBnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': False,
'indent': 100
})
lineBhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': False,
'indent': 100
})
sub_heading = workbook.add_format()
sub_heading.set_font_size(20)
sub_heading.set_bold(True)
small_font = workbook.add_format()
small_font.set_font_size(9)
# Get the current version of Lens
logger.debug("Getting lens version for '"+lens+"'")
versionString = getCurrentLensVersion(WACLIENT,lens)
logger.debug("Adding worksheet using version "+versionString)
lensName = lens[0:18]
worksheet = workbook.add_worksheet((lensName+' v'+versionString))
# Print in landscape
worksheet.set_landscape()
# Set to 8.5x11 paper size
worksheet.set_paper(1)
# Set the column widths
worksheet.set_column('A:A', 11)
worksheet.set_column('B:B', 32)
worksheet.set_column('C:C', 56)
worksheet.set_column('D:D', 29)
worksheet.set_column('E:E', 57)
worksheet.set_column('F:F', 18)
worksheet.set_column('G:G', 70)
# Top of sheet
worksheet.merge_range('A1:G1', 'Workload Overview', heading)
worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)
worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)
worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)
# If we are using an existing workload, then display the Name, ID, and Description at the top
# or else just make it blank
if WORKLOADID:
worksheet.write('C3', workloadName, bold_border)
accountIdParsed = AWSAccountId.split(':')[4]
worksheet.write('C4', accountIdParsed, bold_border)
worksheet.write('C5', workloadDescription, bold_border)
else:
worksheet.write('C3', '', bold_border)
worksheet.write('C4', '', bold_border)
worksheet.write('C5', '', bold_border)
worksheet.write('D3', 'Enter the name of system', small_font)
worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)
worksheet.write('D5', 'Briefly describe system architecture and workload, flow etc.', small_font)
# Subheadings for columns
worksheet.write('A8', 'Pillar', sub_heading)
worksheet.write('B8', 'Question', sub_heading)
worksheet.write('C8', 'Explanation', sub_heading)
worksheet.write('D8', 'Choice (Best Practice)', sub_heading)
worksheet.write('E8', 'Detail', sub_heading)
worksheet.write('F8', 'Response', sub_heading)
worksheet.write('G8', 'Notes (optional)', sub_heading)
# Freeze the top of the sheet
worksheet.freeze_panes(8,0)
# AutoFilter on the first two columns
worksheet.autofilter('A8:B8')
# Make it easier to print
worksheet.repeat_rows(1, 8)
worksheet.fit_to_pages(1, 99)
# Starting point for pillar questions
cellPosition = 8
# Starting cell look with lineA. Will switch back and forth
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
for pillar in PILLAR_PARSE_MAP:
# This is the question number for each pillar (ex: OPS1, OPS2, etc)
qNum = 1
# The query will return all questions for a lens and pillar
jmesquery = "[?PillarId=='"+pillar+"']"
allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)
# For each of the possible answers, parse them and put into the Worksheet
for answers in allQuestionsForPillar:
# List all best practices
questionTitle = PILLAR_PARSE_MAP[answers['PillarId']]+str(qNum)+" - "+answers['QuestionTitle']
qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes = getQuestionDetails(WACLIENT,workloadId,lens,answers['QuestionId'])
# Some of the questions have extra whitespaces and I need to remove those to fit into the cell
qDescription = qDescription.replace('\n ','').replace(' ','').replace('\t', '').replace('\n', '')
qDescription = qDescription.rstrip()
qDescription = qDescription.strip()
logger.debug("Working on '"+questionTitle+"'")
logger.debug("It has answers of: "+json.dumps(answers['SelectedChoices']))
cellID = cellPosition + 1
# If the question has been answered (which we do for the TEMP workload) we grab the URL and parse for the HTML content
if qImprovementPlanUrl:
jmesquery = "[?QuestionId=='"+answers['QuestionId']+"'].Choices[].ChoiceId"
choiceList = jmespath.search(jmesquery, allQuestionsForLens)
ipList = getImprovementPlanItems(WACLIENT,workloadId,lens,answers['QuestionId'],answers['PillarId'],qImprovementPlanUrl,choiceList)
else:
ipList = []
startingCellID=cellID
# If its the first time through this particular pillar question:
# I want to only write the name once, but I need to fill in
# each cell with the same data so the autosort works properly
# (else it will only show the first best practice)
firstTimePillar=True
for choices in answers['Choices']:
# Write the pillar name and question in every cell for autosort, but only show the first one
cell = 'A'+str(cellID)
if firstTimePillar:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellnoborder)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellnoborder)
firstTimePillar=False
else:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellhidden)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellhidden)
# Start writing each of the BP's, details, etc
cell = 'D'+str(cellID)
Title = choices['Title'].replace(' ','').replace('\t', '').replace('\n', '')
if any(choices['ChoiceId'] in d for d in ipList):
worksheet.write_url(cell, ipList[choices['ChoiceId']], myCell, string=Title)
#ipItemHTML, questionIdText = getImprovementPlanHTMLDescription(ipList[choices['ChoiceId']],answers['PillarId'])
#htmlString = ipItemHTML.text
htmlString = ""
htmlString = htmlString.replace('\n ','').replace(' ','').replace('\t', '').strip().rstrip()
# print(htmlString)
worksheet.write_comment(cell, htmlString, {'author': 'Improvement Plan'})
else:
worksheet.write(cell,Title,myCell)
# Add all Details for each best practice/choice
cell = 'E'+str(cellID)
# Remove all of the extra spaces in the description field
Description = choices['Description'].replace('\n ','')
Description = Description.replace('\n ','')
Description = Description.replace(' ','').replace('\t', '').replace('\n', '')
Description = Description.rstrip()
Description = Description.strip()
worksheet.write(cell, Description ,myCell)
# If this is an existing workload, we will show SELECTED if the have it checked
# I would love to use a XLSX checkbox, but this library doesn't support it
cell = 'F'+str(cellID)
responseText = ""
if choices['ChoiceId'] in answers['SelectedChoices']:
responseText = "SELECTED"
else:
responseText = ""
worksheet.write(cell, responseText ,myCell)
cellID+=1
# We are out of the choice/detail/response loop, so know how many rows were consumed
# and we can create the explanation and notes field to span all of them
# Explanantion field
cellMerge = 'C'+str(startingCellID)+':C'+str(cellID-1)
worksheet.merge_range(cellMerge, qDescription,myCell)
# Notes field
cellMerge = 'G'+str(startingCellID)+':G'+str(cellID-1)
if WORKLOADID:
worksheet.merge_range(cellMerge, qNotes, myCell)
else:
worksheet.merge_range(cellMerge, "", myCell)
cellID-=1
# Increase the question number
qNum += 1
# Reset the starting cellPosition to the last cellID
cellPosition = cellID
# Reset the cell formatting to alternate between the two colors
if myCell == lineA:
myCell = lineB
myCellhidden = lineBhidden
myCellnoborder = lineBnoborder
else:
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
def main():
boto3_min_version = "1.16.38"
# Verify if the version of Boto3 we are running has the wellarchitected APIs included
if (packaging.version.parse(boto3.__version__) < packaging.version.parse(boto3_min_version)):
logger.error("Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)" % (boto3.__version__, boto3_min_version))
exit()
logger.info("Script version %s" % __version__)
logger.info("Starting Boto %s Session" % boto3.__version__)
# Create a new boto3 session
SESSION1 = boto3.session.Session(profile_name=PROFILE)
# Initiate the well-architected session using the region defined above
WACLIENT = SESSION1.client(
service_name='wellarchitected',
region_name=REGION_NAME,
)
# If this is an existing workload, we need to query for the various workload properties
if WORKLOADID:
logger.info("User specified workload id of %s" % WORKLOADID)
workloadJson = GetWorkload(WACLIENT,WORKLOADID)
LENSES = workloadJson['Lenses']
logger.info("Lenses for %s: %s" % (WORKLOADID, json.dumps(LENSES)))
WORKLOADNAME = workloadJson['WorkloadName']
DESCRIPTION = workloadJson['Description']
REVIEWOWNER = workloadJson['ReviewOwner']
ENVIRONMENT= workloadJson['Environment']
AWSREGIONS = workloadJson['AwsRegions']
workloadId = WORKLOADID
workloadARN = workloadJson['WorkloadArn']
else:
# In order to gather all of the questions, you must create a TEMP Workload
logger.info("No workload ID specified, we will create a TEMP workload")
# Grab all lenses that are currently available
LENSES = listLens(WACLIENT)
logger.info("Lenses available: "+json.dumps(LENSES))
# Set the needed workload variables before we create it
WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'
DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'
REVIEWOWNER = 'WA Python Script'
ENVIRONMENT= 'PRODUCTION'
AWSREGIONS = [REGION_NAME]
# Creating the TEMP workload
logger.info("Creating a new workload to gather questions and answers")
workloadId, workloadARN = CreateNewWorkload(WACLIENT,WORKLOADNAME,DESCRIPTION,REVIEWOWNER,ENVIRONMENT,AWSREGIONS,LENSES,"[]","[]")
# Create an new xlsx file and add a worksheet.
logger.info("Creating xlsx file '"+FILENAME+"'")
workbook = xlsxwriter.Workbook(FILENAME)
workbook.set_size(2800, 1600)
# Simple hack to get Wellarchitected base framework first (reverse sort)
# This will no longer work if we ever have a lens that starts with WB*, X, Y, or Z :)
LENSES.sort(reverse=True)
# Iterate over each lens that we either have added or is in the workload
for lens in LENSES:
# Grab all questions for a particular lens
allQuestions = findAllQuestionId(WACLIENT,workloadId,lens)
if WORKLOADID:
# If this is an existing workload, just go ahead and create the Tab and cells
logger.debug("Not answering questions for existing workload")
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions,WORKLOADNAME,workloadARN,DESCRIPTION)
else:
# If this is the TEMP workload, we need to first gather all of the questionIDs possible
jmesquery = "[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}"
allQuestionIds = jmespath.search(jmesquery, allQuestions)
# Next we answer all of the questions across all lenses in the TEMP workload
for question in allQuestionIds:
logger.debug("Answering question %s in the %s lens" % (question['QuestionId'], lens))
updateAnswersForQuestion(WACLIENT,workloadId,lens,question['QuestionId'],question['Choices'],'TEMP WORKLOAD - Added by export script')
# Once the questions have been answered, we go ahead and create the tab for each
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions)
# Close out the workbook file
logger.info("Closing Workbook File")
workbook.close()
# If this is TEMP workload, we may remove it if it has not been set to keep
if not WORKLOADID:
if not KEEPTEMP:
logger.info("Removing TEMP Workload")
DeleteWorkload(WACLIENT, workloadId)
logger.info("Done")
if __name__ == "__main__":
main()
| findAllQuestionId | identifier_name |
exportAnswersToXLSX.py | #!/usr/bin/env python3
# This is a tool to export the WA framework answers to a XLSX file
#
# This code is only for use in Well-Architected labs
# *** NOT FOR PRODUCTION USE ***
#
# Licensed under the Apache 2.0 and MITnoAttr License.
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
# https://aws.amazon.com/apache2.0/
import botocore
import boto3
import json
import datetime
import logging
import jmespath
import xlsxwriter
import argparse
from pkg_resources import packaging
import urllib.request
from bs4 import BeautifulSoup, NavigableString, Tag
__author__ = "Eric Pullen"
__email__ = "eppullen@amazon.com"
__copyright__ = "Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved."
__credits__ = ["Eric Pullen"]
__version__ = "0.1"
# Default region listed here
REGION_NAME = "us-east-1"
blankjson = {}
response = ""
# Setup Logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger()
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('s3transfer').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
PARSER = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
This utility has two options to run:
------------------------------------
1) If you provide a workloadid, this will gather all of the answers across all Well-Architected Lenss and export them to a spreadsheet.
2) If you do not provide a workloadid, the utility will generate a TEMP workload and auto-answer every question. It will then generate a spreadsheet with all of the questions, best practices, and even the improvement plan links for each.
'''
)
PARSER.add_argument('-p','--profile', required=False, default="default", help='AWS CLI Profile Name')
PARSER.add_argument('-r','--region', required=False, default="us-east-1", help='From Region Name. Example: us-east-1')
PARSER.add_argument('-w','--workloadid', required=False, default="", help='Workload Id to use instead of creating a TEMP workload')
PARSER.add_argument('-k','--keeptempworkload', action='store_true', help='If you want to keep the TEMP workload created at the end of the export')
PARSER.add_argument('-f','--fileName', required=True, default="./demo.xlsx", help='FileName to export XLSX')
PARSER.add_argument('-v','--debug', action='store_true', help='print debug messages to stderr')
ARGUMENTS = PARSER.parse_args()
PROFILE = ARGUMENTS.profile
FILENAME = ARGUMENTS.fileName
REGION_NAME = ARGUMENTS.region
WORKLOADID = ARGUMENTS.workloadid
KEEPTEMP = ARGUMENTS.keeptempworkload
if ARGUMENTS.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# To map our short hand names in the console to the API defined pillars
# Example: print(PILLAR_PARSE_MAP['performance'])
PILLAR_PARSE_MAP = {
"operationalExcellence": "OPS",
"security": "SEC",
"reliability": "REL",
"performance": "PERF",
"costOptimization": "COST"
}
PILLAR_PROPER_NAME_MAP = {
"operationalExcellence": "Operational Excellence",
"security": "Security",
"reliability": "Reliability",
"performance": "Performance Efficiency",
"costOptimization": "Cost Optimization"
}
# Helper class to convert a datetime item to JSON.
class DateTimeEncoder(json.JSONEncoder):
def default(self, z):
if isinstance(z, datetime.datetime):
return (str(z))
else:
return super().default(z)
def CreateNewWorkload(
waclient,
workloadName,
description,
reviewOwner,
environment,
awsRegions,
lenses,
tags,
pillarPriorities,
notes="",
nonAwsRegions=[],
architecturalDesign='',
industryType='',
industry='',
accountIds=[]
):
# Create your workload
try:
response=waclient.create_workload(
WorkloadName=workloadName,
Description=description,
ReviewOwner=reviewOwner,
Environment=environment,
AwsRegions=awsRegions,
Lenses=lenses,
NonAwsRegions=nonAwsRegions,
ArchitecturalDesign=architecturalDesign,
IndustryType=industryType,
Industry=industry,
Notes=notes,
AccountIds=accountIds
)
except waclient.exceptions.ConflictException as e:
workloadId,workloadARN = FindWorkload(waclient,workloadName)
logger.error("ERROR - The workload name %s already exists as workloadId %s" % (workloadName, workloadId))
return workloadId, workloadARN
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
workloadId = response['WorkloadId']
workloadARN = response['WorkloadArn']
return workloadId, workloadARN
def FindWorkload(
waclient,
workloadName
):
# Finding your WorkloadId
try:
response=waclient.list_workloads(
WorkloadNamePrefix=workloadName
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print("Full JSON:",json.dumps(response['WorkloadSummaries'], cls=DateTimeEncoder))
workloadId = response['WorkloadSummaries'][0]['WorkloadId']
workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']
# print("WorkloadId",workloadId)
return workloadId, workloadArn
def DeleteWorkload(
waclient,
workloadId
):
# Delete the WorkloadId
try:
response=waclient.delete_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
def GetWorkload(
waclient,
workloadId
):
# Get the WorkloadId
try:
response=waclient.get_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
exit()
# print("Full JSON:",json.dumps(response['Workload'], cls=DateTimeEncoder))
workload = response['Workload']
# print("WorkloadId",workloadId)
return workload
def listLens(
waclient
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
lenses = jmespath.search("LensSummaries[*].LensAlias", response)
return lenses
def getCurrentLensVersion(
waclient,
lensAlias
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
searchString = "LensSummaries[?LensAlias==`"+lensAlias+"`].LensVersion"
lenses = jmespath.search(searchString, response)
return lenses[0]
def findAllQuestionId(
waclient,
workloadId,
lensAlias
):
answers = []
# Due to a bug in some lenses, I have to iterate over each pillar in order to
# retrieve the correct results.
for pillar in PILLAR_PARSE_MAP:
logger.debug("Grabbing answers for %s %s" % (lensAlias, pillar))
# Find a questionID using the questionTitle
try:
response=waclient.list_answers(
WorkloadId=workloadId,
LensAlias=lensAlias,
PillarId=pillar
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
while "NextToken" in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,LensAlias=lensAlias,PillarId=pillar,NextToken=response["NextToken"])
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
return answers
def getQuestionDetails(
waclient,
workloadId,
lensAlias,
questionId
):
# Find a answer for a questionId
try:
response=waclient.get_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
qDescription = jmespath.search("Answer.QuestionDescription", response)
qImprovementPlanUrl = jmespath.search("Answer.ImprovementPlanUrl", response)
qHelpfulResourceUrl = jmespath.search("Answer.HelpfulResourceUrl", response)
qNotes = jmespath.search("Answer.Notes", response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(
waclient,
workloadId,
lensAlias,
questionId,
selectedChoices,
notes
):
# Update a answer to a question
try:
response=waclient.update_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId,
SelectedChoices=selectedChoices,
Notes=notes
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
jmesquery = "Answer.SelectedChoices"
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(
waclient,
workloadId,
lensAlias,
QuestionId,
PillarId,
ImprovementPlanUrl,
ChoiceList
):
# This will parse the IP Items to gather the links we need
response = {}
htmlString = ""
# unanswered = getUnansweredForQuestion(waclient,workloadId,'wellarchitected',QuestionId)
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line,features="html.parser")
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(
ImprovementPlanUrl,
PillarId
):
logger.debug("ImprovementPlanUrl: %s for pillar %s " % (ImprovementPlanUrl,PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
# Grab the number of the step we are referencing
# This will work as long as their are less than 99 steps.
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
#Generate the string for the step number
firstItem = "step"+stepNumber
secondItem = ("step"+str((int(stepNumber)+1)))
logger.debug ("Going from %s to %s" % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ""
questionIdText = ""
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i,features="html.parser")
questionIdText = str(bsparse.text).split(':')[0].strip()
if (secondItem in i) or ("</div>" in i):
foundit = 0
if firstItem in i:
foundit = 1
ipString+=i
elif foundit:
ipString+=i
prettyHTML = BeautifulSoup(ipString,features="html.parser")
# Need to remove all of the "local glossary links" since they point to relative paths
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(
WACLIENT,
workloadId,
lens,
workbook,
allQuestionsForLens,
workloadName="",
AWSAccountId="",
workloadDescription=""
):
# Setup some formatting for the workbook
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True
})
bold_border_bold = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True,
'font_size': 20,
'bold': True
})
heading = workbook.add_format({
'font_size': 24,
'bold': True
})
lineA = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineB = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineBnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': False,
'indent': 100
})
lineBhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': False,
'indent': 100
})
sub_heading = workbook.add_format()
sub_heading.set_font_size(20)
sub_heading.set_bold(True)
small_font = workbook.add_format()
small_font.set_font_size(9)
# Get the current version of Lens
logger.debug("Getting lens version for '"+lens+"'")
versionString = getCurrentLensVersion(WACLIENT,lens)
logger.debug("Adding worksheet using version "+versionString)
lensName = lens[0:18]
worksheet = workbook.add_worksheet((lensName+' v'+versionString))
# Print in landscape
worksheet.set_landscape()
# Set to 8.5x11 paper size
worksheet.set_paper(1)
# Set the column widths
worksheet.set_column('A:A', 11)
worksheet.set_column('B:B', 32)
worksheet.set_column('C:C', 56)
worksheet.set_column('D:D', 29)
worksheet.set_column('E:E', 57)
worksheet.set_column('F:F', 18)
worksheet.set_column('G:G', 70)
# Top of sheet
worksheet.merge_range('A1:G1', 'Workload Overview', heading)
worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)
worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)
worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)
# If we are using an existing workload, then display the Name, ID, and Description at the top
# or else just make it blank
if WORKLOADID:
worksheet.write('C3', workloadName, bold_border)
accountIdParsed = AWSAccountId.split(':')[4]
worksheet.write('C4', accountIdParsed, bold_border)
worksheet.write('C5', workloadDescription, bold_border)
else:
worksheet.write('C3', '', bold_border)
worksheet.write('C4', '', bold_border)
worksheet.write('C5', '', bold_border)
worksheet.write('D3', 'Enter the name of system', small_font)
worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)
worksheet.write('D5', 'Briefly describe system architecture and workload, flow etc.', small_font)
# Subheadings for columns
worksheet.write('A8', 'Pillar', sub_heading)
worksheet.write('B8', 'Question', sub_heading)
worksheet.write('C8', 'Explanation', sub_heading)
worksheet.write('D8', 'Choice (Best Practice)', sub_heading)
worksheet.write('E8', 'Detail', sub_heading)
worksheet.write('F8', 'Response', sub_heading)
worksheet.write('G8', 'Notes (optional)', sub_heading)
# Freeze the top of the sheet
worksheet.freeze_panes(8,0)
# AutoFilter on the first two columns
worksheet.autofilter('A8:B8')
# Make it easier to print
worksheet.repeat_rows(1, 8)
worksheet.fit_to_pages(1, 99)
# Starting point for pillar questions
cellPosition = 8
# Starting cell look with lineA. Will switch back and forth
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
for pillar in PILLAR_PARSE_MAP:
# This is the question number for each pillar (ex: OPS1, OPS2, etc)
qNum = 1
# The query will return all questions for a lens and pillar
jmesquery = "[?PillarId=='"+pillar+"']"
allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)
# For each of the possible answers, parse them and put into the Worksheet
for answers in allQuestionsForPillar:
# List all best practices
questionTitle = PILLAR_PARSE_MAP[answers['PillarId']]+str(qNum)+" - "+answers['QuestionTitle']
qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes = getQuestionDetails(WACLIENT,workloadId,lens,answers['QuestionId'])
# Some of the questions have extra whitespaces and I need to remove those to fit into the cell
qDescription = qDescription.replace('\n ','').replace(' ','').replace('\t', '').replace('\n', '')
qDescription = qDescription.rstrip()
qDescription = qDescription.strip()
logger.debug("Working on '"+questionTitle+"'")
logger.debug("It has answers of: "+json.dumps(answers['SelectedChoices']))
cellID = cellPosition + 1
# If the question has been answered (which we do for the TEMP workload) we grab the URL and parse for the HTML content
if qImprovementPlanUrl:
jmesquery = "[?QuestionId=='"+answers['QuestionId']+"'].Choices[].ChoiceId"
choiceList = jmespath.search(jmesquery, allQuestionsForLens)
ipList = getImprovementPlanItems(WACLIENT,workloadId,lens,answers['QuestionId'],answers['PillarId'],qImprovementPlanUrl,choiceList)
else:
ipList = []
startingCellID=cellID
# If its the first time through this particular pillar question:
# I want to only write the name once, but I need to fill in
# each cell with the same data so the autosort works properly
# (else it will only show the first best practice)
firstTimePillar=True
for choices in answers['Choices']:
# Write the pillar name and question in every cell for autosort, but only show the first one
cell = 'A'+str(cellID)
if firstTimePillar:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellnoborder)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellnoborder)
firstTimePillar=False
else:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellhidden)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellhidden)
# Start writing each of the BP's, details, etc
cell = 'D'+str(cellID)
Title = choices['Title'].replace(' ','').replace('\t', '').replace('\n', '')
if any(choices['ChoiceId'] in d for d in ipList):
worksheet.write_url(cell, ipList[choices['ChoiceId']], myCell, string=Title)
#ipItemHTML, questionIdText = getImprovementPlanHTMLDescription(ipList[choices['ChoiceId']],answers['PillarId'])
#htmlString = ipItemHTML.text
htmlString = ""
htmlString = htmlString.replace('\n ','').replace(' ','').replace('\t', '').strip().rstrip()
# print(htmlString)
worksheet.write_comment(cell, htmlString, {'author': 'Improvement Plan'})
else:
worksheet.write(cell,Title,myCell)
# Add all Details for each best practice/choice
cell = 'E'+str(cellID)
# Remove all of the extra spaces in the description field
Description = choices['Description'].replace('\n ','')
Description = Description.replace('\n ','')
Description = Description.replace(' ','').replace('\t', '').replace('\n', '')
Description = Description.rstrip()
Description = Description.strip()
worksheet.write(cell, Description ,myCell)
# If this is an existing workload, we will show SELECTED if the have it checked
# I would love to use a XLSX checkbox, but this library doesn't support it
cell = 'F'+str(cellID)
responseText = ""
if choices['ChoiceId'] in answers['SelectedChoices']:
responseText = "SELECTED"
else:
responseText = ""
worksheet.write(cell, responseText ,myCell)
cellID+=1
# We are out of the choice/detail/response loop, so know how many rows were consumed
# and we can create the explanation and notes field to span all of them
# Explanantion field
cellMerge = 'C'+str(startingCellID)+':C'+str(cellID-1)
worksheet.merge_range(cellMerge, qDescription,myCell)
# Notes field
cellMerge = 'G'+str(startingCellID)+':G'+str(cellID-1)
if WORKLOADID:
worksheet.merge_range(cellMerge, qNotes, myCell)
else:
worksheet.merge_range(cellMerge, "", myCell)
cellID-=1
# Increase the question number
qNum += 1
# Reset the starting cellPosition to the last cellID
cellPosition = cellID
# Reset the cell formatting to alternate between the two colors
if myCell == lineA:
myCell = lineB
myCellhidden = lineBhidden
myCellnoborder = lineBnoborder
else:
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
def main():
boto3_min_version = "1.16.38"
# Verify if the version of Boto3 we are running has the wellarchitected APIs included
if (packaging.version.parse(boto3.__version__) < packaging.version.parse(boto3_min_version)):
logger.error("Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)" % (boto3.__version__, boto3_min_version))
exit()
logger.info("Script version %s" % __version__)
logger.info("Starting Boto %s Session" % boto3.__version__)
# Create a new boto3 session
SESSION1 = boto3.session.Session(profile_name=PROFILE)
# Initiate the well-architected session using the region defined above
WACLIENT = SESSION1.client(
service_name='wellarchitected',
region_name=REGION_NAME,
)
# If this is an existing workload, we need to query for the various workload properties
if WORKLOADID:
logger.info("User specified workload id of %s" % WORKLOADID)
workloadJson = GetWorkload(WACLIENT,WORKLOADID)
LENSES = workloadJson['Lenses']
logger.info("Lenses for %s: %s" % (WORKLOADID, json.dumps(LENSES)))
WORKLOADNAME = workloadJson['WorkloadName']
DESCRIPTION = workloadJson['Description']
REVIEWOWNER = workloadJson['ReviewOwner']
ENVIRONMENT= workloadJson['Environment']
AWSREGIONS = workloadJson['AwsRegions']
workloadId = WORKLOADID
workloadARN = workloadJson['WorkloadArn']
else:
# In order to gather all of the questions, you must create a TEMP Workload
logger.info("No workload ID specified, we will create a TEMP workload")
# Grab all lenses that are currently available
LENSES = listLens(WACLIENT)
logger.info("Lenses available: "+json.dumps(LENSES))
# Set the needed workload variables before we create it
WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'
DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'
REVIEWOWNER = 'WA Python Script'
ENVIRONMENT= 'PRODUCTION'
AWSREGIONS = [REGION_NAME]
# Creating the TEMP workload
logger.info("Creating a new workload to gather questions and answers")
workloadId, workloadARN = CreateNewWorkload(WACLIENT,WORKLOADNAME,DESCRIPTION,REVIEWOWNER,ENVIRONMENT,AWSREGIONS,LENSES,"[]","[]")
# Create an new xlsx file and add a worksheet.
logger.info("Creating xlsx file '"+FILENAME+"'")
workbook = xlsxwriter.Workbook(FILENAME)
workbook.set_size(2800, 1600)
# Simple hack to get Wellarchitected base framework first (reverse sort)
# This will no longer work if we ever have a lens that starts with WB*, X, Y, or Z :)
LENSES.sort(reverse=True)
# Iterate over each lens that we either have added or is in the workload
for lens in LENSES:
# Grab all questions for a particular lens
allQuestions = findAllQuestionId(WACLIENT,workloadId,lens)
if WORKLOADID:
# If this is an existing workload, just go ahead and create the Tab and cells
logger.debug("Not answering questions for existing workload")
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions,WORKLOADNAME,workloadARN,DESCRIPTION)
else:
# If this is the TEMP workload, we need to first gather all of the questionIDs possible
jmesquery = "[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}"
allQuestionIds = jmespath.search(jmesquery, allQuestions)
# Next we answer all of the questions across all lenses in the TEMP workload
for question in allQuestionIds:
logger.debug("Answering question %s in the %s lens" % (question['QuestionId'], lens))
updateAnswersForQuestion(WACLIENT,workloadId,lens,question['QuestionId'],question['Choices'],'TEMP WORKLOAD - Added by export script')
# Once the questions have been answered, we go ahead and create the tab for each
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions)
# Close out the workbook file
logger.info("Closing Workbook File")
workbook.close()
# If this is TEMP workload, we may remove it if it has not been set to keep
if not WORKLOADID:
if not KEEPTEMP:
logger.info("Removing TEMP Workload")
DeleteWorkload(WACLIENT, workloadId)
logger.info("Done")
if __name__ == "__main__":
| main() | conditional_block | |
exportAnswersToXLSX.py | #!/usr/bin/env python3
# This is a tool to export the WA framework answers to a XLSX file
#
# This code is only for use in Well-Architected labs
# *** NOT FOR PRODUCTION USE ***
#
# Licensed under the Apache 2.0 and MITnoAttr License.
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
# https://aws.amazon.com/apache2.0/
import botocore
import boto3
import json
import datetime
import logging
import jmespath
import xlsxwriter
import argparse
from pkg_resources import packaging
import urllib.request
from bs4 import BeautifulSoup, NavigableString, Tag
__author__ = "Eric Pullen"
__email__ = "eppullen@amazon.com"
__copyright__ = "Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved."
__credits__ = ["Eric Pullen"]
__version__ = "0.1"
# Default region listed here
REGION_NAME = "us-east-1"
blankjson = {}
response = ""
# Setup Logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger()
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('s3transfer').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
PARSER = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
This utility has two options to run:
------------------------------------
1) If you provide a workloadid, this will gather all of the answers across all Well-Architected Lenss and export them to a spreadsheet.
2) If you do not provide a workloadid, the utility will generate a TEMP workload and auto-answer every question. It will then generate a spreadsheet with all of the questions, best practices, and even the improvement plan links for each.
'''
)
PARSER.add_argument('-p','--profile', required=False, default="default", help='AWS CLI Profile Name')
PARSER.add_argument('-r','--region', required=False, default="us-east-1", help='From Region Name. Example: us-east-1')
PARSER.add_argument('-w','--workloadid', required=False, default="", help='Workload Id to use instead of creating a TEMP workload')
PARSER.add_argument('-k','--keeptempworkload', action='store_true', help='If you want to keep the TEMP workload created at the end of the export')
PARSER.add_argument('-f','--fileName', required=True, default="./demo.xlsx", help='FileName to export XLSX')
PARSER.add_argument('-v','--debug', action='store_true', help='print debug messages to stderr')
ARGUMENTS = PARSER.parse_args()
PROFILE = ARGUMENTS.profile
FILENAME = ARGUMENTS.fileName
REGION_NAME = ARGUMENTS.region
WORKLOADID = ARGUMENTS.workloadid
KEEPTEMP = ARGUMENTS.keeptempworkload
if ARGUMENTS.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# To map our short hand names in the console to the API defined pillars
# Example: print(PILLAR_PARSE_MAP['performance'])
PILLAR_PARSE_MAP = {
"operationalExcellence": "OPS",
"security": "SEC",
"reliability": "REL",
"performance": "PERF",
"costOptimization": "COST"
}
PILLAR_PROPER_NAME_MAP = {
"operationalExcellence": "Operational Excellence",
"security": "Security",
"reliability": "Reliability",
"performance": "Performance Efficiency",
"costOptimization": "Cost Optimization"
}
# Helper class to convert a datetime item to JSON.
class DateTimeEncoder(json.JSONEncoder):
def default(self, z):
if isinstance(z, datetime.datetime):
return (str(z))
else:
return super().default(z)
def CreateNewWorkload(
waclient,
workloadName,
description,
reviewOwner,
environment,
awsRegions,
lenses,
tags,
pillarPriorities,
notes="",
nonAwsRegions=[],
architecturalDesign='',
industryType='',
industry='',
accountIds=[]
):
# Create your workload
try:
response=waclient.create_workload(
WorkloadName=workloadName,
Description=description,
ReviewOwner=reviewOwner,
Environment=environment,
AwsRegions=awsRegions,
Lenses=lenses,
NonAwsRegions=nonAwsRegions,
ArchitecturalDesign=architecturalDesign,
IndustryType=industryType,
Industry=industry,
Notes=notes,
AccountIds=accountIds
)
except waclient.exceptions.ConflictException as e:
workloadId,workloadARN = FindWorkload(waclient,workloadName)
logger.error("ERROR - The workload name %s already exists as workloadId %s" % (workloadName, workloadId))
return workloadId, workloadARN
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
workloadId = response['WorkloadId']
workloadARN = response['WorkloadArn']
return workloadId, workloadARN
def FindWorkload(
waclient,
workloadName
):
# Finding your WorkloadId
try:
response=waclient.list_workloads(
WorkloadNamePrefix=workloadName
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print("Full JSON:",json.dumps(response['WorkloadSummaries'], cls=DateTimeEncoder))
workloadId = response['WorkloadSummaries'][0]['WorkloadId']
workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']
# print("WorkloadId",workloadId)
return workloadId, workloadArn
def DeleteWorkload(
waclient,
workloadId
):
# Delete the WorkloadId
try:
response=waclient.delete_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
def GetWorkload(
waclient,
workloadId
):
# Get the WorkloadId
try:
response=waclient.get_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
exit()
# print("Full JSON:",json.dumps(response['Workload'], cls=DateTimeEncoder))
workload = response['Workload']
# print("WorkloadId",workloadId)
return workload
def listLens(
waclient
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
lenses = jmespath.search("LensSummaries[*].LensAlias", response)
return lenses
def getCurrentLensVersion(
waclient,
lensAlias
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
searchString = "LensSummaries[?LensAlias==`"+lensAlias+"`].LensVersion"
lenses = jmespath.search(searchString, response)
return lenses[0]
def findAllQuestionId(
waclient,
workloadId,
lensAlias
):
answers = []
# Due to a bug in some lenses, I have to iterate over each pillar in order to
# retrieve the correct results.
for pillar in PILLAR_PARSE_MAP:
logger.debug("Grabbing answers for %s %s" % (lensAlias, pillar))
# Find a questionID using the questionTitle
try:
response=waclient.list_answers(
WorkloadId=workloadId,
LensAlias=lensAlias,
PillarId=pillar
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
while "NextToken" in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,LensAlias=lensAlias,PillarId=pillar,NextToken=response["NextToken"])
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
return answers
def getQuestionDetails(
waclient,
workloadId,
lensAlias,
questionId
):
# Find a answer for a questionId
try:
response=waclient.get_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
qDescription = jmespath.search("Answer.QuestionDescription", response)
qImprovementPlanUrl = jmespath.search("Answer.ImprovementPlanUrl", response)
qHelpfulResourceUrl = jmespath.search("Answer.HelpfulResourceUrl", response)
qNotes = jmespath.search("Answer.Notes", response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(
waclient,
workloadId,
lensAlias,
questionId,
selectedChoices,
notes
):
# Update a answer to a question
try:
response=waclient.update_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId,
SelectedChoices=selectedChoices,
Notes=notes
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
jmesquery = "Answer.SelectedChoices"
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(
waclient,
workloadId,
lensAlias,
QuestionId,
PillarId,
ImprovementPlanUrl,
ChoiceList
):
# This will parse the IP Items to gather the links we need
response = {}
htmlString = ""
# unanswered = getUnansweredForQuestion(waclient,workloadId,'wellarchitected',QuestionId)
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line,features="html.parser")
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(
ImprovementPlanUrl,
PillarId
):
logger.debug("ImprovementPlanUrl: %s for pillar %s " % (ImprovementPlanUrl,PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
# Grab the number of the step we are referencing
# This will work as long as their are less than 99 steps.
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
#Generate the string for the step number
firstItem = "step"+stepNumber
secondItem = ("step"+str((int(stepNumber)+1)))
logger.debug ("Going from %s to %s" % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ""
questionIdText = ""
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i,features="html.parser")
questionIdText = str(bsparse.text).split(':')[0].strip()
if (secondItem in i) or ("</div>" in i):
foundit = 0
if firstItem in i:
foundit = 1
ipString+=i
elif foundit:
ipString+=i
prettyHTML = BeautifulSoup(ipString,features="html.parser")
# Need to remove all of the "local glossary links" since they point to relative paths
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(
WACLIENT,
workloadId,
lens,
workbook,
allQuestionsForLens,
workloadName="",
AWSAccountId="",
workloadDescription=""
):
# Setup some formatting for the workbook
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True
})
bold_border_bold = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True,
'font_size': 20,
'bold': True
})
heading = workbook.add_format({
'font_size': 24,
'bold': True
})
lineA = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineB = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineBnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black', | 'text_wrap': False,
'indent': 100
})
lineBhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': False,
'indent': 100
})
sub_heading = workbook.add_format()
sub_heading.set_font_size(20)
sub_heading.set_bold(True)
small_font = workbook.add_format()
small_font.set_font_size(9)
# Get the current version of Lens
logger.debug("Getting lens version for '"+lens+"'")
versionString = getCurrentLensVersion(WACLIENT,lens)
logger.debug("Adding worksheet using version "+versionString)
lensName = lens[0:18]
worksheet = workbook.add_worksheet((lensName+' v'+versionString))
# Print in landscape
worksheet.set_landscape()
# Set to 8.5x11 paper size
worksheet.set_paper(1)
# Set the column widths
worksheet.set_column('A:A', 11)
worksheet.set_column('B:B', 32)
worksheet.set_column('C:C', 56)
worksheet.set_column('D:D', 29)
worksheet.set_column('E:E', 57)
worksheet.set_column('F:F', 18)
worksheet.set_column('G:G', 70)
# Top of sheet
worksheet.merge_range('A1:G1', 'Workload Overview', heading)
worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)
worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)
worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)
# If we are using an existing workload, then display the Name, ID, and Description at the top
# or else just make it blank
if WORKLOADID:
worksheet.write('C3', workloadName, bold_border)
accountIdParsed = AWSAccountId.split(':')[4]
worksheet.write('C4', accountIdParsed, bold_border)
worksheet.write('C5', workloadDescription, bold_border)
else:
worksheet.write('C3', '', bold_border)
worksheet.write('C4', '', bold_border)
worksheet.write('C5', '', bold_border)
worksheet.write('D3', 'Enter the name of system', small_font)
worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)
worksheet.write('D5', 'Briefly describe system architecture and workload, flow etc.', small_font)
# Subheadings for columns
worksheet.write('A8', 'Pillar', sub_heading)
worksheet.write('B8', 'Question', sub_heading)
worksheet.write('C8', 'Explanation', sub_heading)
worksheet.write('D8', 'Choice (Best Practice)', sub_heading)
worksheet.write('E8', 'Detail', sub_heading)
worksheet.write('F8', 'Response', sub_heading)
worksheet.write('G8', 'Notes (optional)', sub_heading)
# Freeze the top of the sheet
worksheet.freeze_panes(8,0)
# AutoFilter on the first two columns
worksheet.autofilter('A8:B8')
# Make it easier to print
worksheet.repeat_rows(1, 8)
worksheet.fit_to_pages(1, 99)
# Starting point for pillar questions
cellPosition = 8
# Starting cell look with lineA. Will switch back and forth
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
for pillar in PILLAR_PARSE_MAP:
# This is the question number for each pillar (ex: OPS1, OPS2, etc)
qNum = 1
# The query will return all questions for a lens and pillar
jmesquery = "[?PillarId=='"+pillar+"']"
allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)
# For each of the possible answers, parse them and put into the Worksheet
for answers in allQuestionsForPillar:
# List all best practices
questionTitle = PILLAR_PARSE_MAP[answers['PillarId']]+str(qNum)+" - "+answers['QuestionTitle']
qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes = getQuestionDetails(WACLIENT,workloadId,lens,answers['QuestionId'])
# Some of the questions have extra whitespaces and I need to remove those to fit into the cell
qDescription = qDescription.replace('\n ','').replace(' ','').replace('\t', '').replace('\n', '')
qDescription = qDescription.rstrip()
qDescription = qDescription.strip()
logger.debug("Working on '"+questionTitle+"'")
logger.debug("It has answers of: "+json.dumps(answers['SelectedChoices']))
cellID = cellPosition + 1
# If the question has been answered (which we do for the TEMP workload) we grab the URL and parse for the HTML content
if qImprovementPlanUrl:
jmesquery = "[?QuestionId=='"+answers['QuestionId']+"'].Choices[].ChoiceId"
choiceList = jmespath.search(jmesquery, allQuestionsForLens)
ipList = getImprovementPlanItems(WACLIENT,workloadId,lens,answers['QuestionId'],answers['PillarId'],qImprovementPlanUrl,choiceList)
else:
ipList = []
startingCellID=cellID
# If its the first time through this particular pillar question:
# I want to only write the name once, but I need to fill in
# each cell with the same data so the autosort works properly
# (else it will only show the first best practice)
firstTimePillar=True
for choices in answers['Choices']:
# Write the pillar name and question in every cell for autosort, but only show the first one
cell = 'A'+str(cellID)
if firstTimePillar:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellnoborder)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellnoborder)
firstTimePillar=False
else:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellhidden)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellhidden)
# Start writing each of the BP's, details, etc
cell = 'D'+str(cellID)
Title = choices['Title'].replace(' ','').replace('\t', '').replace('\n', '')
if any(choices['ChoiceId'] in d for d in ipList):
worksheet.write_url(cell, ipList[choices['ChoiceId']], myCell, string=Title)
#ipItemHTML, questionIdText = getImprovementPlanHTMLDescription(ipList[choices['ChoiceId']],answers['PillarId'])
#htmlString = ipItemHTML.text
htmlString = ""
htmlString = htmlString.replace('\n ','').replace(' ','').replace('\t', '').strip().rstrip()
# print(htmlString)
worksheet.write_comment(cell, htmlString, {'author': 'Improvement Plan'})
else:
worksheet.write(cell,Title,myCell)
# Add all Details for each best practice/choice
cell = 'E'+str(cellID)
# Remove all of the extra spaces in the description field
Description = choices['Description'].replace('\n ','')
Description = Description.replace('\n ','')
Description = Description.replace(' ','').replace('\t', '').replace('\n', '')
Description = Description.rstrip()
Description = Description.strip()
worksheet.write(cell, Description ,myCell)
# If this is an existing workload, we will show SELECTED if the have it checked
# I would love to use a XLSX checkbox, but this library doesn't support it
cell = 'F'+str(cellID)
responseText = ""
if choices['ChoiceId'] in answers['SelectedChoices']:
responseText = "SELECTED"
else:
responseText = ""
worksheet.write(cell, responseText ,myCell)
cellID+=1
# We are out of the choice/detail/response loop, so know how many rows were consumed
# and we can create the explanation and notes field to span all of them
# Explanantion field
cellMerge = 'C'+str(startingCellID)+':C'+str(cellID-1)
worksheet.merge_range(cellMerge, qDescription,myCell)
# Notes field
cellMerge = 'G'+str(startingCellID)+':G'+str(cellID-1)
if WORKLOADID:
worksheet.merge_range(cellMerge, qNotes, myCell)
else:
worksheet.merge_range(cellMerge, "", myCell)
cellID-=1
# Increase the question number
qNum += 1
# Reset the starting cellPosition to the last cellID
cellPosition = cellID
# Reset the cell formatting to alternate between the two colors
if myCell == lineA:
myCell = lineB
myCellhidden = lineBhidden
myCellnoborder = lineBnoborder
else:
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
def main():
boto3_min_version = "1.16.38"
# Verify if the version of Boto3 we are running has the wellarchitected APIs included
if (packaging.version.parse(boto3.__version__) < packaging.version.parse(boto3_min_version)):
logger.error("Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)" % (boto3.__version__, boto3_min_version))
exit()
logger.info("Script version %s" % __version__)
logger.info("Starting Boto %s Session" % boto3.__version__)
# Create a new boto3 session
SESSION1 = boto3.session.Session(profile_name=PROFILE)
# Initiate the well-architected session using the region defined above
WACLIENT = SESSION1.client(
service_name='wellarchitected',
region_name=REGION_NAME,
)
# If this is an existing workload, we need to query for the various workload properties
if WORKLOADID:
logger.info("User specified workload id of %s" % WORKLOADID)
workloadJson = GetWorkload(WACLIENT,WORKLOADID)
LENSES = workloadJson['Lenses']
logger.info("Lenses for %s: %s" % (WORKLOADID, json.dumps(LENSES)))
WORKLOADNAME = workloadJson['WorkloadName']
DESCRIPTION = workloadJson['Description']
REVIEWOWNER = workloadJson['ReviewOwner']
ENVIRONMENT= workloadJson['Environment']
AWSREGIONS = workloadJson['AwsRegions']
workloadId = WORKLOADID
workloadARN = workloadJson['WorkloadArn']
else:
# In order to gather all of the questions, you must create a TEMP Workload
logger.info("No workload ID specified, we will create a TEMP workload")
# Grab all lenses that are currently available
LENSES = listLens(WACLIENT)
logger.info("Lenses available: "+json.dumps(LENSES))
# Set the needed workload variables before we create it
WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'
DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'
REVIEWOWNER = 'WA Python Script'
ENVIRONMENT= 'PRODUCTION'
AWSREGIONS = [REGION_NAME]
# Creating the TEMP workload
logger.info("Creating a new workload to gather questions and answers")
workloadId, workloadARN = CreateNewWorkload(WACLIENT,WORKLOADNAME,DESCRIPTION,REVIEWOWNER,ENVIRONMENT,AWSREGIONS,LENSES,"[]","[]")
# Create an new xlsx file and add a worksheet.
logger.info("Creating xlsx file '"+FILENAME+"'")
workbook = xlsxwriter.Workbook(FILENAME)
workbook.set_size(2800, 1600)
# Simple hack to get Wellarchitected base framework first (reverse sort)
# This will no longer work if we ever have a lens that starts with WB*, X, Y, or Z :)
LENSES.sort(reverse=True)
# Iterate over each lens that we either have added or is in the workload
for lens in LENSES:
# Grab all questions for a particular lens
allQuestions = findAllQuestionId(WACLIENT,workloadId,lens)
if WORKLOADID:
# If this is an existing workload, just go ahead and create the Tab and cells
logger.debug("Not answering questions for existing workload")
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions,WORKLOADNAME,workloadARN,DESCRIPTION)
else:
# If this is the TEMP workload, we need to first gather all of the questionIDs possible
jmesquery = "[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}"
allQuestionIds = jmespath.search(jmesquery, allQuestions)
# Next we answer all of the questions across all lenses in the TEMP workload
for question in allQuestionIds:
logger.debug("Answering question %s in the %s lens" % (question['QuestionId'], lens))
updateAnswersForQuestion(WACLIENT,workloadId,lens,question['QuestionId'],question['Choices'],'TEMP WORKLOAD - Added by export script')
# Once the questions have been answered, we go ahead and create the tab for each
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions)
# Close out the workbook file
logger.info("Closing Workbook File")
workbook.close()
# If this is TEMP workload, we may remove it if it has not been set to keep
if not WORKLOADID:
if not KEEPTEMP:
logger.info("Removing TEMP Workload")
DeleteWorkload(WACLIENT, workloadId)
logger.info("Done")
if __name__ == "__main__":
main() | 'bg_color': '#E0EBF6',
'align': 'top', | random_line_split |
exportAnswersToXLSX.py | #!/usr/bin/env python3
# This is a tool to export the WA framework answers to a XLSX file
#
# This code is only for use in Well-Architected labs
# *** NOT FOR PRODUCTION USE ***
#
# Licensed under the Apache 2.0 and MITnoAttr License.
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
# https://aws.amazon.com/apache2.0/
import botocore
import boto3
import json
import datetime
import logging
import jmespath
import xlsxwriter
import argparse
from pkg_resources import packaging
import urllib.request
from bs4 import BeautifulSoup, NavigableString, Tag
__author__ = "Eric Pullen"
__email__ = "eppullen@amazon.com"
__copyright__ = "Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved."
__credits__ = ["Eric Pullen"]
__version__ = "0.1"
# Default region listed here
REGION_NAME = "us-east-1"
blankjson = {}
response = ""
# Setup Logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger()
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('s3transfer').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
PARSER = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
This utility has two options to run:
------------------------------------
1) If you provide a workloadid, this will gather all of the answers across all Well-Architected Lenss and export them to a spreadsheet.
2) If you do not provide a workloadid, the utility will generate a TEMP workload and auto-answer every question. It will then generate a spreadsheet with all of the questions, best practices, and even the improvement plan links for each.
'''
)
PARSER.add_argument('-p','--profile', required=False, default="default", help='AWS CLI Profile Name')
PARSER.add_argument('-r','--region', required=False, default="us-east-1", help='From Region Name. Example: us-east-1')
PARSER.add_argument('-w','--workloadid', required=False, default="", help='Workload Id to use instead of creating a TEMP workload')
PARSER.add_argument('-k','--keeptempworkload', action='store_true', help='If you want to keep the TEMP workload created at the end of the export')
PARSER.add_argument('-f','--fileName', required=True, default="./demo.xlsx", help='FileName to export XLSX')
PARSER.add_argument('-v','--debug', action='store_true', help='print debug messages to stderr')
ARGUMENTS = PARSER.parse_args()
PROFILE = ARGUMENTS.profile
FILENAME = ARGUMENTS.fileName
REGION_NAME = ARGUMENTS.region
WORKLOADID = ARGUMENTS.workloadid
KEEPTEMP = ARGUMENTS.keeptempworkload
if ARGUMENTS.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# To map our short hand names in the console to the API defined pillars
# Example: print(PILLAR_PARSE_MAP['performance'])
PILLAR_PARSE_MAP = {
"operationalExcellence": "OPS",
"security": "SEC",
"reliability": "REL",
"performance": "PERF",
"costOptimization": "COST"
}
PILLAR_PROPER_NAME_MAP = {
"operationalExcellence": "Operational Excellence",
"security": "Security",
"reliability": "Reliability",
"performance": "Performance Efficiency",
"costOptimization": "Cost Optimization"
}
# Helper class to convert a datetime item to JSON.
class DateTimeEncoder(json.JSONEncoder):
def default(self, z):
if isinstance(z, datetime.datetime):
return (str(z))
else:
return super().default(z)
def CreateNewWorkload(
waclient,
workloadName,
description,
reviewOwner,
environment,
awsRegions,
lenses,
tags,
pillarPriorities,
notes="",
nonAwsRegions=[],
architecturalDesign='',
industryType='',
industry='',
accountIds=[]
):
# Create your workload
|
def FindWorkload(
waclient,
workloadName
):
# Finding your WorkloadId
try:
response=waclient.list_workloads(
WorkloadNamePrefix=workloadName
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print("Full JSON:",json.dumps(response['WorkloadSummaries'], cls=DateTimeEncoder))
workloadId = response['WorkloadSummaries'][0]['WorkloadId']
workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']
# print("WorkloadId",workloadId)
return workloadId, workloadArn
def DeleteWorkload(
waclient,
workloadId
):
# Delete the WorkloadId
try:
response=waclient.delete_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
def GetWorkload(
waclient,
workloadId
):
# Get the WorkloadId
try:
response=waclient.get_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
exit()
# print("Full JSON:",json.dumps(response['Workload'], cls=DateTimeEncoder))
workload = response['Workload']
# print("WorkloadId",workloadId)
return workload
def listLens(
waclient
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
lenses = jmespath.search("LensSummaries[*].LensAlias", response)
return lenses
def getCurrentLensVersion(
waclient,
lensAlias
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
searchString = "LensSummaries[?LensAlias==`"+lensAlias+"`].LensVersion"
lenses = jmespath.search(searchString, response)
return lenses[0]
def findAllQuestionId(
waclient,
workloadId,
lensAlias
):
answers = []
# Due to a bug in some lenses, I have to iterate over each pillar in order to
# retrieve the correct results.
for pillar in PILLAR_PARSE_MAP:
logger.debug("Grabbing answers for %s %s" % (lensAlias, pillar))
# Find a questionID using the questionTitle
try:
response=waclient.list_answers(
WorkloadId=workloadId,
LensAlias=lensAlias,
PillarId=pillar
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
while "NextToken" in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,LensAlias=lensAlias,PillarId=pillar,NextToken=response["NextToken"])
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
return answers
def getQuestionDetails(
waclient,
workloadId,
lensAlias,
questionId
):
# Find a answer for a questionId
try:
response=waclient.get_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
qDescription = jmespath.search("Answer.QuestionDescription", response)
qImprovementPlanUrl = jmespath.search("Answer.ImprovementPlanUrl", response)
qHelpfulResourceUrl = jmespath.search("Answer.HelpfulResourceUrl", response)
qNotes = jmespath.search("Answer.Notes", response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(
waclient,
workloadId,
lensAlias,
questionId,
selectedChoices,
notes
):
# Update a answer to a question
try:
response=waclient.update_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId,
SelectedChoices=selectedChoices,
Notes=notes
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
jmesquery = "Answer.SelectedChoices"
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(
waclient,
workloadId,
lensAlias,
QuestionId,
PillarId,
ImprovementPlanUrl,
ChoiceList
):
# This will parse the IP Items to gather the links we need
response = {}
htmlString = ""
# unanswered = getUnansweredForQuestion(waclient,workloadId,'wellarchitected',QuestionId)
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line,features="html.parser")
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(
ImprovementPlanUrl,
PillarId
):
logger.debug("ImprovementPlanUrl: %s for pillar %s " % (ImprovementPlanUrl,PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
# Grab the number of the step we are referencing
# This will work as long as their are less than 99 steps.
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
#Generate the string for the step number
firstItem = "step"+stepNumber
secondItem = ("step"+str((int(stepNumber)+1)))
logger.debug ("Going from %s to %s" % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ""
questionIdText = ""
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i,features="html.parser")
questionIdText = str(bsparse.text).split(':')[0].strip()
if (secondItem in i) or ("</div>" in i):
foundit = 0
if firstItem in i:
foundit = 1
ipString+=i
elif foundit:
ipString+=i
prettyHTML = BeautifulSoup(ipString,features="html.parser")
# Need to remove all of the "local glossary links" since they point to relative paths
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(
WACLIENT,
workloadId,
lens,
workbook,
allQuestionsForLens,
workloadName="",
AWSAccountId="",
workloadDescription=""
):
# Setup some formatting for the workbook
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True
})
bold_border_bold = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True,
'font_size': 20,
'bold': True
})
heading = workbook.add_format({
'font_size': 24,
'bold': True
})
lineA = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineB = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineBnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': False,
'indent': 100
})
lineBhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': False,
'indent': 100
})
sub_heading = workbook.add_format()
sub_heading.set_font_size(20)
sub_heading.set_bold(True)
small_font = workbook.add_format()
small_font.set_font_size(9)
# Get the current version of Lens
logger.debug("Getting lens version for '"+lens+"'")
versionString = getCurrentLensVersion(WACLIENT,lens)
logger.debug("Adding worksheet using version "+versionString)
lensName = lens[0:18]
worksheet = workbook.add_worksheet((lensName+' v'+versionString))
# Print in landscape
worksheet.set_landscape()
# Set to 8.5x11 paper size
worksheet.set_paper(1)
# Set the column widths
worksheet.set_column('A:A', 11)
worksheet.set_column('B:B', 32)
worksheet.set_column('C:C', 56)
worksheet.set_column('D:D', 29)
worksheet.set_column('E:E', 57)
worksheet.set_column('F:F', 18)
worksheet.set_column('G:G', 70)
# Top of sheet
worksheet.merge_range('A1:G1', 'Workload Overview', heading)
worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)
worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)
worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)
# If we are using an existing workload, then display the Name, ID, and Description at the top
# or else just make it blank
if WORKLOADID:
worksheet.write('C3', workloadName, bold_border)
accountIdParsed = AWSAccountId.split(':')[4]
worksheet.write('C4', accountIdParsed, bold_border)
worksheet.write('C5', workloadDescription, bold_border)
else:
worksheet.write('C3', '', bold_border)
worksheet.write('C4', '', bold_border)
worksheet.write('C5', '', bold_border)
worksheet.write('D3', 'Enter the name of system', small_font)
worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)
worksheet.write('D5', 'Briefly describe system architecture and workload, flow etc.', small_font)
# Subheadings for columns
worksheet.write('A8', 'Pillar', sub_heading)
worksheet.write('B8', 'Question', sub_heading)
worksheet.write('C8', 'Explanation', sub_heading)
worksheet.write('D8', 'Choice (Best Practice)', sub_heading)
worksheet.write('E8', 'Detail', sub_heading)
worksheet.write('F8', 'Response', sub_heading)
worksheet.write('G8', 'Notes (optional)', sub_heading)
# Freeze the top of the sheet
worksheet.freeze_panes(8,0)
# AutoFilter on the first two columns
worksheet.autofilter('A8:B8')
# Make it easier to print
worksheet.repeat_rows(1, 8)
worksheet.fit_to_pages(1, 99)
# Starting point for pillar questions
cellPosition = 8
# Starting cell look with lineA. Will switch back and forth
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
for pillar in PILLAR_PARSE_MAP:
# This is the question number for each pillar (ex: OPS1, OPS2, etc)
qNum = 1
# The query will return all questions for a lens and pillar
jmesquery = "[?PillarId=='"+pillar+"']"
allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)
# For each of the possible answers, parse them and put into the Worksheet
for answers in allQuestionsForPillar:
# List all best practices
questionTitle = PILLAR_PARSE_MAP[answers['PillarId']]+str(qNum)+" - "+answers['QuestionTitle']
qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes = getQuestionDetails(WACLIENT,workloadId,lens,answers['QuestionId'])
# Some of the questions have extra whitespaces and I need to remove those to fit into the cell
qDescription = qDescription.replace('\n ','').replace(' ','').replace('\t', '').replace('\n', '')
qDescription = qDescription.rstrip()
qDescription = qDescription.strip()
logger.debug("Working on '"+questionTitle+"'")
logger.debug("It has answers of: "+json.dumps(answers['SelectedChoices']))
cellID = cellPosition + 1
# If the question has been answered (which we do for the TEMP workload) we grab the URL and parse for the HTML content
if qImprovementPlanUrl:
jmesquery = "[?QuestionId=='"+answers['QuestionId']+"'].Choices[].ChoiceId"
choiceList = jmespath.search(jmesquery, allQuestionsForLens)
ipList = getImprovementPlanItems(WACLIENT,workloadId,lens,answers['QuestionId'],answers['PillarId'],qImprovementPlanUrl,choiceList)
else:
ipList = []
startingCellID=cellID
# If its the first time through this particular pillar question:
# I want to only write the name once, but I need to fill in
# each cell with the same data so the autosort works properly
# (else it will only show the first best practice)
firstTimePillar=True
for choices in answers['Choices']:
# Write the pillar name and question in every cell for autosort, but only show the first one
cell = 'A'+str(cellID)
if firstTimePillar:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellnoborder)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellnoborder)
firstTimePillar=False
else:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellhidden)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellhidden)
# Start writing each of the BP's, details, etc
cell = 'D'+str(cellID)
Title = choices['Title'].replace(' ','').replace('\t', '').replace('\n', '')
if any(choices['ChoiceId'] in d for d in ipList):
worksheet.write_url(cell, ipList[choices['ChoiceId']], myCell, string=Title)
#ipItemHTML, questionIdText = getImprovementPlanHTMLDescription(ipList[choices['ChoiceId']],answers['PillarId'])
#htmlString = ipItemHTML.text
htmlString = ""
htmlString = htmlString.replace('\n ','').replace(' ','').replace('\t', '').strip().rstrip()
# print(htmlString)
worksheet.write_comment(cell, htmlString, {'author': 'Improvement Plan'})
else:
worksheet.write(cell,Title,myCell)
# Add all Details for each best practice/choice
cell = 'E'+str(cellID)
# Remove all of the extra spaces in the description field
Description = choices['Description'].replace('\n ','')
Description = Description.replace('\n ','')
Description = Description.replace(' ','').replace('\t', '').replace('\n', '')
Description = Description.rstrip()
Description = Description.strip()
worksheet.write(cell, Description ,myCell)
# If this is an existing workload, we will show SELECTED if the have it checked
# I would love to use a XLSX checkbox, but this library doesn't support it
cell = 'F'+str(cellID)
responseText = ""
if choices['ChoiceId'] in answers['SelectedChoices']:
responseText = "SELECTED"
else:
responseText = ""
worksheet.write(cell, responseText ,myCell)
cellID+=1
# We are out of the choice/detail/response loop, so know how many rows were consumed
# and we can create the explanation and notes field to span all of them
# Explanantion field
cellMerge = 'C'+str(startingCellID)+':C'+str(cellID-1)
worksheet.merge_range(cellMerge, qDescription,myCell)
# Notes field
cellMerge = 'G'+str(startingCellID)+':G'+str(cellID-1)
if WORKLOADID:
worksheet.merge_range(cellMerge, qNotes, myCell)
else:
worksheet.merge_range(cellMerge, "", myCell)
cellID-=1
# Increase the question number
qNum += 1
# Reset the starting cellPosition to the last cellID
cellPosition = cellID
# Reset the cell formatting to alternate between the two colors
if myCell == lineA:
myCell = lineB
myCellhidden = lineBhidden
myCellnoborder = lineBnoborder
else:
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
def main():
boto3_min_version = "1.16.38"
# Verify if the version of Boto3 we are running has the wellarchitected APIs included
if (packaging.version.parse(boto3.__version__) < packaging.version.parse(boto3_min_version)):
logger.error("Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)" % (boto3.__version__, boto3_min_version))
exit()
logger.info("Script version %s" % __version__)
logger.info("Starting Boto %s Session" % boto3.__version__)
# Create a new boto3 session
SESSION1 = boto3.session.Session(profile_name=PROFILE)
# Initiate the well-architected session using the region defined above
WACLIENT = SESSION1.client(
service_name='wellarchitected',
region_name=REGION_NAME,
)
# If this is an existing workload, we need to query for the various workload properties
if WORKLOADID:
logger.info("User specified workload id of %s" % WORKLOADID)
workloadJson = GetWorkload(WACLIENT,WORKLOADID)
LENSES = workloadJson['Lenses']
logger.info("Lenses for %s: %s" % (WORKLOADID, json.dumps(LENSES)))
WORKLOADNAME = workloadJson['WorkloadName']
DESCRIPTION = workloadJson['Description']
REVIEWOWNER = workloadJson['ReviewOwner']
ENVIRONMENT= workloadJson['Environment']
AWSREGIONS = workloadJson['AwsRegions']
workloadId = WORKLOADID
workloadARN = workloadJson['WorkloadArn']
else:
# In order to gather all of the questions, you must create a TEMP Workload
logger.info("No workload ID specified, we will create a TEMP workload")
# Grab all lenses that are currently available
LENSES = listLens(WACLIENT)
logger.info("Lenses available: "+json.dumps(LENSES))
# Set the needed workload variables before we create it
WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'
DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'
REVIEWOWNER = 'WA Python Script'
ENVIRONMENT= 'PRODUCTION'
AWSREGIONS = [REGION_NAME]
# Creating the TEMP workload
logger.info("Creating a new workload to gather questions and answers")
workloadId, workloadARN = CreateNewWorkload(WACLIENT,WORKLOADNAME,DESCRIPTION,REVIEWOWNER,ENVIRONMENT,AWSREGIONS,LENSES,"[]","[]")
# Create an new xlsx file and add a worksheet.
logger.info("Creating xlsx file '"+FILENAME+"'")
workbook = xlsxwriter.Workbook(FILENAME)
workbook.set_size(2800, 1600)
# Simple hack to get Wellarchitected base framework first (reverse sort)
# This will no longer work if we ever have a lens that starts with WB*, X, Y, or Z :)
LENSES.sort(reverse=True)
# Iterate over each lens that we either have added or is in the workload
for lens in LENSES:
# Grab all questions for a particular lens
allQuestions = findAllQuestionId(WACLIENT,workloadId,lens)
if WORKLOADID:
# If this is an existing workload, just go ahead and create the Tab and cells
logger.debug("Not answering questions for existing workload")
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions,WORKLOADNAME,workloadARN,DESCRIPTION)
else:
# If this is the TEMP workload, we need to first gather all of the questionIDs possible
jmesquery = "[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}"
allQuestionIds = jmespath.search(jmesquery, allQuestions)
# Next we answer all of the questions across all lenses in the TEMP workload
for question in allQuestionIds:
logger.debug("Answering question %s in the %s lens" % (question['QuestionId'], lens))
updateAnswersForQuestion(WACLIENT,workloadId,lens,question['QuestionId'],question['Choices'],'TEMP WORKLOAD - Added by export script')
# Once the questions have been answered, we go ahead and create the tab for each
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions)
# Close out the workbook file
logger.info("Closing Workbook File")
workbook.close()
# If this is TEMP workload, we may remove it if it has not been set to keep
if not WORKLOADID:
if not KEEPTEMP:
logger.info("Removing TEMP Workload")
DeleteWorkload(WACLIENT, workloadId)
logger.info("Done")
if __name__ == "__main__":
main()
| try:
response=waclient.create_workload(
WorkloadName=workloadName,
Description=description,
ReviewOwner=reviewOwner,
Environment=environment,
AwsRegions=awsRegions,
Lenses=lenses,
NonAwsRegions=nonAwsRegions,
ArchitecturalDesign=architecturalDesign,
IndustryType=industryType,
Industry=industry,
Notes=notes,
AccountIds=accountIds
)
except waclient.exceptions.ConflictException as e:
workloadId,workloadARN = FindWorkload(waclient,workloadName)
logger.error("ERROR - The workload name %s already exists as workloadId %s" % (workloadName, workloadId))
return workloadId, workloadARN
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
workloadId = response['WorkloadId']
workloadARN = response['WorkloadArn']
return workloadId, workloadARN | identifier_body |
network_context.rs | // Copyright 2019-2023 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
convert::TryFrom,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::{Duration, SystemTime},
};
use crate::blocks::{FullTipset, Tipset, TipsetKeys};
use crate::libp2p::{
chain_exchange::{
ChainExchangeRequest, ChainExchangeResponse, CompactedMessages, TipsetBundle, HEADERS,
MESSAGES,
},
hello::{HelloRequest, HelloResponse},
rpc::RequestResponseError,
NetworkMessage, PeerId, PeerManager, BITSWAP_TIMEOUT,
};
use anyhow::Context;
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::CborStore;
use serde::de::DeserializeOwned;
use std::future::Future;
use tokio::sync::Semaphore;
use tokio::task::JoinSet;
use tracing::{debug, trace, warn};
/// Timeout for response from an RPC request
// TODO this value can be tweaked, this is just set pretty low to avoid peers
// timing out requests from slowing the node down. If increase, should create a
// countermeasure for this.
const CHAIN_EXCHANGE_TIMEOUT: Duration = Duration::from_secs(5);
/// Maximum number of concurrent chain exchange request being sent to the
/// network.
const MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS: usize = 2;
/// Context used in chain sync to handle network requests.
/// This contains the peer manager, P2P service interface, and [`Blockstore`]
/// required to make network requests.
pub(in crate::chain_sync) struct SyncNetworkContext<DB> {
/// Channel to send network messages through P2P service
network_send: flume::Sender<NetworkMessage>,
/// Manages peers to send requests to and updates request stats for the
/// respective peers.
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
}
impl<DB> Clone for SyncNetworkContext<DB> {
fn clone(&self) -> Self {
Self {
network_send: self.network_send.clone(),
peer_manager: self.peer_manager.clone(),
db: self.db.clone(),
}
}
}
/// Race tasks to completion while limiting the number of tasks that may execute concurrently.
/// Once a task finishes without error, the rest of the tasks are canceled.
struct RaceBatch<T> {
tasks: JoinSet<Result<T, String>>,
semaphore: Arc<Semaphore>,
}
impl<T> RaceBatch<T>
where
T: Send + 'static,
{
pub fn new(max_concurrent_jobs: usize) -> Self {
RaceBatch {
tasks: JoinSet::new(),
semaphore: Arc::new(Semaphore::new(max_concurrent_jobs)),
}
}
pub fn add(&mut self, future: impl Future<Output = Result<T, String>> + Send + 'static) {
let sem = self.semaphore.clone();
self.tasks.spawn(async move {
let permit = sem
.acquire_owned()
.await
.map_err(|_| "Semaphore unexpectedly closed")?;
let result = future.await;
drop(permit);
result
});
}
/// Return first finishing `Ok` future else return `None` if all jobs failed
pub async fn get_ok(mut self) -> Option<T> {
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
return Some(value);
}
}
// So far every task have failed
None
}
}
impl<DB> SyncNetworkContext<DB>
where
DB: Blockstore,
{
pub fn new(
network_send: flume::Sender<NetworkMessage>,
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
) -> Self {
Self {
network_send,
peer_manager,
db,
}
}
/// Returns a reference to the peer manager of the network context.
pub fn peer_manager(&self) -> &PeerManager {
self.peer_manager.as_ref()
}
/// Send a `chain_exchange` request for only block headers (ignore
/// messages). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_headers(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<Arc<Tipset>>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, HEADERS)
.await
}
/// Send a `chain_exchange` request for only messages (ignore block
/// headers). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_messages(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<CompactedMessages>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, MESSAGES)
.await
}
/// Send a `chain_exchange` request for a single full tipset (includes
/// messages) If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_fts(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let mut fts = self
.handle_chain_exchange_request(peer_id, tsk, 1, HEADERS | MESSAGES)
.await?;
if fts.len() != 1 {
return Err(format!(
"Full tipset request returned {} tipsets",
fts.len()
));
}
Ok(fts.remove(0))
}
/// Requests that some content with a particular `Cid` get fetched over
/// `Bitswap` if it doesn't exist in the `BlockStore`.
pub async fn bitswap_get<TMessage: DeserializeOwned>(
&self,
content: Cid,
) -> Result<TMessage, String> |
/// Helper function to handle the peer retrieval if no peer supplied as well
/// as the logging and updating of the peer info in the `PeerManager`.
async fn handle_chain_exchange_request<T>(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
request_len: u64,
options: u64,
) -> Result<Vec<T>, String>
where
T: TryFrom<TipsetBundle, Error = String> + Send + Sync + 'static,
{
let request = ChainExchangeRequest {
start: tsk.cids().to_vec(),
request_len,
options,
};
let global_pre_time = SystemTime::now();
let network_failures = Arc::new(AtomicU64::new(0));
let lookup_failures = Arc::new(AtomicU64::new(0));
let chain_exchange_result = match peer_id {
// Specific peer is given to send request, send specifically to that peer.
Some(id) => Self::chain_exchange_request(
self.peer_manager.clone(),
self.network_send.clone(),
id,
request,
)
.await?
.into_result()?,
None => {
// No specific peer set, send requests to a shuffled set of top peers until
// a request succeeds.
let peers = self.peer_manager.top_peers_shuffled().await;
let mut batch = RaceBatch::new(MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS);
for peer_id in peers.into_iter() {
let peer_manager = self.peer_manager.clone();
let network_send = self.network_send.clone();
let request = request.clone();
let network_failures = network_failures.clone();
let lookup_failures = lookup_failures.clone();
batch.add(async move {
match Self::chain_exchange_request(
peer_manager,
network_send,
peer_id,
request,
)
.await
{
Ok(chain_exchange_result) => {
match chain_exchange_result.into_result::<T>() {
Ok(r) => Ok(r),
Err(e) => {
lookup_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange response: {e}");
Err(e)
}
}
}
Err(e) => {
network_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange request to peer {peer_id:?}: {e}");
Err(e)
}
}
});
}
let make_failure_message = || {
let mut message = String::new();
message.push_str("ChainExchange request failed for all top peers. ");
message.push_str(&format!(
"{} network failures, ",
network_failures.load(Ordering::Relaxed)
));
message.push_str(&format!(
"{} lookup failures, ",
lookup_failures.load(Ordering::Relaxed)
));
message.push_str(&format!("request:\n{request:?}",));
message
};
let v = batch.get_ok().await.ok_or_else(make_failure_message)?;
debug!("Succeed: handle_chain_exchange_request");
v
}
};
// Log success for the global request with the latency from before sending.
match SystemTime::now().duration_since(global_pre_time) {
Ok(t) => self.peer_manager.log_global_success(t).await,
Err(e) => {
warn!("logged time less than before request: {}", e);
}
}
Ok(chain_exchange_result)
}
/// Send a `chain_exchange` request to the network and await response.
async fn chain_exchange_request(
peer_manager: Arc<PeerManager>,
network_send: flume::Sender<NetworkMessage>,
peer_id: PeerId,
request: ChainExchangeRequest,
) -> Result<ChainExchangeResponse, String> {
debug!("Sending ChainExchange Request to {peer_id}");
let req_pre_time = SystemTime::now();
let (tx, rx) = flume::bounded(1);
if network_send
.send_async(NetworkMessage::ChainExchangeRequest {
peer_id,
request,
response_channel: tx,
})
.await
.is_err()
{
return Err("Failed to send chain exchange request to network".to_string());
};
// Add timeout to receiving response from p2p service to avoid stalling.
// There is also a timeout inside the request-response calls, but this ensures
// this.
let res =
tokio::task::spawn_blocking(move || rx.recv_timeout(CHAIN_EXCHANGE_TIMEOUT)).await;
let res_duration = SystemTime::now()
.duration_since(req_pre_time)
.unwrap_or_default();
match res {
Ok(Ok(Ok(bs_res))) => {
// Successful response
peer_manager.log_success(peer_id, res_duration).await;
debug!("Succeeded: ChainExchange Request to {peer_id}");
Ok(bs_res)
}
Ok(Ok(Err(e))) => {
// Internal libp2p error, score failure for peer and potentially disconnect
match e {
RequestResponseError::ConnectionClosed
| RequestResponseError::DialFailure
| RequestResponseError::UnsupportedProtocols => {
peer_manager.mark_peer_bad(peer_id).await;
}
// Ignore dropping peer on timeout for now. Can't be confident yet that the
// specified timeout is adequate time.
RequestResponseError::Timeout => {
peer_manager.log_failure(peer_id, res_duration).await;
}
}
debug!("Failed: ChainExchange Request to {peer_id}");
Err(format!("Internal libp2p error: {e:?}"))
}
Ok(Err(_)) | Err(_) => {
// Sender channel internally dropped or timeout, both should log failure which
// will negatively score the peer, but not drop yet.
peer_manager.log_failure(peer_id, res_duration).await;
debug!("Timeout: ChainExchange Request to {peer_id}");
Err(format!("Chain exchange request to {peer_id} timed out"))
}
}
}
/// Send a hello request to the network (does not immediately await
/// response).
pub async fn hello_request(
&self,
peer_id: PeerId,
request: HelloRequest,
) -> anyhow::Result<(PeerId, SystemTime, Option<HelloResponse>)> {
trace!("Sending Hello Message to {}", peer_id);
// Create oneshot channel for receiving response from sent hello.
let (tx, rx) = flume::bounded(1);
// Send request into libp2p service
self.network_send
.send_async(NetworkMessage::HelloRequest {
peer_id,
request,
response_channel: tx,
})
.await
.context("Failed to send hello request: receiver dropped")?;
const HELLO_TIMEOUT: Duration = Duration::from_secs(5);
let sent = SystemTime::now();
let res = tokio::task::spawn_blocking(move || rx.recv_timeout(HELLO_TIMEOUT))
.await?
.ok();
Ok((peer_id, sent, res))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::atomic::{AtomicBool, AtomicUsize};
#[tokio::test]
async fn race_batch_ok() {
let mut batch = RaceBatch::new(3);
batch.add(async move { Ok(1) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(1));
}
#[tokio::test]
async fn race_batch_ok_faster() {
let mut batch = RaceBatch::new(3);
batch.add(async move {
tokio::time::sleep(Duration::from_secs(100)).await;
Ok(1)
});
batch.add(async move { Ok(2) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(2));
}
#[tokio::test]
async fn race_batch_none() {
let mut batch: RaceBatch<i32> = RaceBatch::new(3);
batch.add(async move { Err("kaboom".into()) });
batch.add(async move { Err("banana".into()) });
assert_eq!(batch.get_ok().await, None);
}
#[tokio::test]
async fn race_batch_semaphore() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed);
}
tokio::task::yield_now().await;
c.fetch_sub(1, Ordering::Relaxed);
Err("banana".into())
});
}
assert_eq!(batch.get_ok().await, None);
assert!(!exceeded.load(Ordering::Relaxed));
}
#[tokio::test]
async fn race_batch_semaphore_exceeded() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
// We add one more job to exceed the limit
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS + 1);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed);
}
tokio::task::yield_now().await;
c.fetch_sub(1, Ordering::Relaxed);
Err("banana".into())
});
}
assert_eq!(batch.get_ok().await, None);
assert!(exceeded.load(Ordering::Relaxed));
}
}
| {
// Check if what we are fetching over Bitswap already exists in the
// database. If it does, return it, else fetch over the network.
if let Some(b) = self.db.get_cbor(&content).map_err(|e| e.to_string())? {
return Ok(b);
}
let (tx, rx) = flume::bounded(1);
self.network_send
.send_async(NetworkMessage::BitswapRequest {
cid: content,
response_channel: tx,
})
.await
.map_err(|_| "failed to send bitswap request, network receiver dropped")?;
let success = tokio::task::spawn_blocking(move || {
rx.recv_timeout(BITSWAP_TIMEOUT).unwrap_or_default()
})
.await
.is_ok();
match self.db.get_cbor(&content) {
Ok(Some(b)) => Ok(b),
Ok(None) => Err(format!(
"Not found in db, bitswap. success: {success} cid, {content:?}"
)),
Err(e) => Err(format!(
"Error retrieving from db. success: {success} cid, {content:?}, {e}"
)),
}
} | identifier_body |
network_context.rs | // Copyright 2019-2023 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
convert::TryFrom,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::{Duration, SystemTime},
};
use crate::blocks::{FullTipset, Tipset, TipsetKeys};
use crate::libp2p::{
chain_exchange::{
ChainExchangeRequest, ChainExchangeResponse, CompactedMessages, TipsetBundle, HEADERS,
MESSAGES,
},
hello::{HelloRequest, HelloResponse},
rpc::RequestResponseError,
NetworkMessage, PeerId, PeerManager, BITSWAP_TIMEOUT,
};
use anyhow::Context;
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::CborStore;
use serde::de::DeserializeOwned;
use std::future::Future;
use tokio::sync::Semaphore;
use tokio::task::JoinSet;
use tracing::{debug, trace, warn};
/// Timeout for response from an RPC request
// TODO this value can be tweaked, this is just set pretty low to avoid peers
// timing out requests from slowing the node down. If increase, should create a
// countermeasure for this.
const CHAIN_EXCHANGE_TIMEOUT: Duration = Duration::from_secs(5);
/// Maximum number of concurrent chain exchange request being sent to the
/// network.
const MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS: usize = 2;
/// Context used in chain sync to handle network requests.
/// This contains the peer manager, P2P service interface, and [`Blockstore`]
/// required to make network requests.
pub(in crate::chain_sync) struct SyncNetworkContext<DB> {
/// Channel to send network messages through P2P service
network_send: flume::Sender<NetworkMessage>,
/// Manages peers to send requests to and updates request stats for the
/// respective peers.
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
}
impl<DB> Clone for SyncNetworkContext<DB> {
fn clone(&self) -> Self {
Self {
network_send: self.network_send.clone(),
peer_manager: self.peer_manager.clone(),
db: self.db.clone(),
}
}
}
/// Race tasks to completion while limiting the number of tasks that may execute concurrently.
/// Once a task finishes without error, the rest of the tasks are canceled.
struct RaceBatch<T> {
tasks: JoinSet<Result<T, String>>,
semaphore: Arc<Semaphore>,
}
impl<T> RaceBatch<T>
where
T: Send + 'static,
{
pub fn new(max_concurrent_jobs: usize) -> Self {
RaceBatch {
tasks: JoinSet::new(),
semaphore: Arc::new(Semaphore::new(max_concurrent_jobs)),
}
}
pub fn add(&mut self, future: impl Future<Output = Result<T, String>> + Send + 'static) {
let sem = self.semaphore.clone();
self.tasks.spawn(async move {
let permit = sem
.acquire_owned()
.await
.map_err(|_| "Semaphore unexpectedly closed")?;
let result = future.await;
drop(permit);
result
});
}
/// Return first finishing `Ok` future else return `None` if all jobs failed
pub async fn get_ok(mut self) -> Option<T> {
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
return Some(value);
}
}
// So far every task have failed
None
}
}
impl<DB> SyncNetworkContext<DB>
where
DB: Blockstore,
{
pub fn | (
network_send: flume::Sender<NetworkMessage>,
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
) -> Self {
Self {
network_send,
peer_manager,
db,
}
}
/// Returns a reference to the peer manager of the network context.
pub fn peer_manager(&self) -> &PeerManager {
self.peer_manager.as_ref()
}
/// Send a `chain_exchange` request for only block headers (ignore
/// messages). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_headers(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<Arc<Tipset>>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, HEADERS)
.await
}
/// Send a `chain_exchange` request for only messages (ignore block
/// headers). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_messages(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<CompactedMessages>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, MESSAGES)
.await
}
/// Send a `chain_exchange` request for a single full tipset (includes
/// messages) If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_fts(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let mut fts = self
.handle_chain_exchange_request(peer_id, tsk, 1, HEADERS | MESSAGES)
.await?;
if fts.len() != 1 {
return Err(format!(
"Full tipset request returned {} tipsets",
fts.len()
));
}
Ok(fts.remove(0))
}
/// Requests that some content with a particular `Cid` get fetched over
/// `Bitswap` if it doesn't exist in the `BlockStore`.
pub async fn bitswap_get<TMessage: DeserializeOwned>(
&self,
content: Cid,
) -> Result<TMessage, String> {
// Check if what we are fetching over Bitswap already exists in the
// database. If it does, return it, else fetch over the network.
if let Some(b) = self.db.get_cbor(&content).map_err(|e| e.to_string())? {
return Ok(b);
}
let (tx, rx) = flume::bounded(1);
self.network_send
.send_async(NetworkMessage::BitswapRequest {
cid: content,
response_channel: tx,
})
.await
.map_err(|_| "failed to send bitswap request, network receiver dropped")?;
let success = tokio::task::spawn_blocking(move || {
rx.recv_timeout(BITSWAP_TIMEOUT).unwrap_or_default()
})
.await
.is_ok();
match self.db.get_cbor(&content) {
Ok(Some(b)) => Ok(b),
Ok(None) => Err(format!(
"Not found in db, bitswap. success: {success} cid, {content:?}"
)),
Err(e) => Err(format!(
"Error retrieving from db. success: {success} cid, {content:?}, {e}"
)),
}
}
/// Helper function to handle the peer retrieval if no peer supplied as well
/// as the logging and updating of the peer info in the `PeerManager`.
async fn handle_chain_exchange_request<T>(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
request_len: u64,
options: u64,
) -> Result<Vec<T>, String>
where
T: TryFrom<TipsetBundle, Error = String> + Send + Sync + 'static,
{
let request = ChainExchangeRequest {
start: tsk.cids().to_vec(),
request_len,
options,
};
let global_pre_time = SystemTime::now();
let network_failures = Arc::new(AtomicU64::new(0));
let lookup_failures = Arc::new(AtomicU64::new(0));
let chain_exchange_result = match peer_id {
// Specific peer is given to send request, send specifically to that peer.
Some(id) => Self::chain_exchange_request(
self.peer_manager.clone(),
self.network_send.clone(),
id,
request,
)
.await?
.into_result()?,
None => {
// No specific peer set, send requests to a shuffled set of top peers until
// a request succeeds.
let peers = self.peer_manager.top_peers_shuffled().await;
let mut batch = RaceBatch::new(MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS);
for peer_id in peers.into_iter() {
let peer_manager = self.peer_manager.clone();
let network_send = self.network_send.clone();
let request = request.clone();
let network_failures = network_failures.clone();
let lookup_failures = lookup_failures.clone();
batch.add(async move {
match Self::chain_exchange_request(
peer_manager,
network_send,
peer_id,
request,
)
.await
{
Ok(chain_exchange_result) => {
match chain_exchange_result.into_result::<T>() {
Ok(r) => Ok(r),
Err(e) => {
lookup_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange response: {e}");
Err(e)
}
}
}
Err(e) => {
network_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange request to peer {peer_id:?}: {e}");
Err(e)
}
}
});
}
let make_failure_message = || {
let mut message = String::new();
message.push_str("ChainExchange request failed for all top peers. ");
message.push_str(&format!(
"{} network failures, ",
network_failures.load(Ordering::Relaxed)
));
message.push_str(&format!(
"{} lookup failures, ",
lookup_failures.load(Ordering::Relaxed)
));
message.push_str(&format!("request:\n{request:?}",));
message
};
let v = batch.get_ok().await.ok_or_else(make_failure_message)?;
debug!("Succeed: handle_chain_exchange_request");
v
}
};
// Log success for the global request with the latency from before sending.
match SystemTime::now().duration_since(global_pre_time) {
Ok(t) => self.peer_manager.log_global_success(t).await,
Err(e) => {
warn!("logged time less than before request: {}", e);
}
}
Ok(chain_exchange_result)
}
/// Send a `chain_exchange` request to the network and await response.
async fn chain_exchange_request(
peer_manager: Arc<PeerManager>,
network_send: flume::Sender<NetworkMessage>,
peer_id: PeerId,
request: ChainExchangeRequest,
) -> Result<ChainExchangeResponse, String> {
debug!("Sending ChainExchange Request to {peer_id}");
let req_pre_time = SystemTime::now();
let (tx, rx) = flume::bounded(1);
if network_send
.send_async(NetworkMessage::ChainExchangeRequest {
peer_id,
request,
response_channel: tx,
})
.await
.is_err()
{
return Err("Failed to send chain exchange request to network".to_string());
};
// Add timeout to receiving response from p2p service to avoid stalling.
// There is also a timeout inside the request-response calls, but this ensures
// this.
let res =
tokio::task::spawn_blocking(move || rx.recv_timeout(CHAIN_EXCHANGE_TIMEOUT)).await;
let res_duration = SystemTime::now()
.duration_since(req_pre_time)
.unwrap_or_default();
match res {
Ok(Ok(Ok(bs_res))) => {
// Successful response
peer_manager.log_success(peer_id, res_duration).await;
debug!("Succeeded: ChainExchange Request to {peer_id}");
Ok(bs_res)
}
Ok(Ok(Err(e))) => {
// Internal libp2p error, score failure for peer and potentially disconnect
match e {
RequestResponseError::ConnectionClosed
| RequestResponseError::DialFailure
| RequestResponseError::UnsupportedProtocols => {
peer_manager.mark_peer_bad(peer_id).await;
}
// Ignore dropping peer on timeout for now. Can't be confident yet that the
// specified timeout is adequate time.
RequestResponseError::Timeout => {
peer_manager.log_failure(peer_id, res_duration).await;
}
}
debug!("Failed: ChainExchange Request to {peer_id}");
Err(format!("Internal libp2p error: {e:?}"))
}
Ok(Err(_)) | Err(_) => {
// Sender channel internally dropped or timeout, both should log failure which
// will negatively score the peer, but not drop yet.
peer_manager.log_failure(peer_id, res_duration).await;
debug!("Timeout: ChainExchange Request to {peer_id}");
Err(format!("Chain exchange request to {peer_id} timed out"))
}
}
}
/// Send a hello request to the network (does not immediately await
/// response).
pub async fn hello_request(
&self,
peer_id: PeerId,
request: HelloRequest,
) -> anyhow::Result<(PeerId, SystemTime, Option<HelloResponse>)> {
trace!("Sending Hello Message to {}", peer_id);
// Create oneshot channel for receiving response from sent hello.
let (tx, rx) = flume::bounded(1);
// Send request into libp2p service
self.network_send
.send_async(NetworkMessage::HelloRequest {
peer_id,
request,
response_channel: tx,
})
.await
.context("Failed to send hello request: receiver dropped")?;
const HELLO_TIMEOUT: Duration = Duration::from_secs(5);
let sent = SystemTime::now();
let res = tokio::task::spawn_blocking(move || rx.recv_timeout(HELLO_TIMEOUT))
.await?
.ok();
Ok((peer_id, sent, res))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::atomic::{AtomicBool, AtomicUsize};
#[tokio::test]
async fn race_batch_ok() {
let mut batch = RaceBatch::new(3);
batch.add(async move { Ok(1) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(1));
}
#[tokio::test]
async fn race_batch_ok_faster() {
let mut batch = RaceBatch::new(3);
batch.add(async move {
tokio::time::sleep(Duration::from_secs(100)).await;
Ok(1)
});
batch.add(async move { Ok(2) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(2));
}
#[tokio::test]
async fn race_batch_none() {
let mut batch: RaceBatch<i32> = RaceBatch::new(3);
batch.add(async move { Err("kaboom".into()) });
batch.add(async move { Err("banana".into()) });
assert_eq!(batch.get_ok().await, None);
}
#[tokio::test]
async fn race_batch_semaphore() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed);
}
tokio::task::yield_now().await;
c.fetch_sub(1, Ordering::Relaxed);
Err("banana".into())
});
}
assert_eq!(batch.get_ok().await, None);
assert!(!exceeded.load(Ordering::Relaxed));
}
#[tokio::test]
async fn race_batch_semaphore_exceeded() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
// We add one more job to exceed the limit
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS + 1);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed);
}
tokio::task::yield_now().await;
c.fetch_sub(1, Ordering::Relaxed);
Err("banana".into())
});
}
assert_eq!(batch.get_ok().await, None);
assert!(exceeded.load(Ordering::Relaxed));
}
}
| new | identifier_name |
network_context.rs | // Copyright 2019-2023 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
convert::TryFrom,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::{Duration, SystemTime},
};
use crate::blocks::{FullTipset, Tipset, TipsetKeys};
use crate::libp2p::{
chain_exchange::{
ChainExchangeRequest, ChainExchangeResponse, CompactedMessages, TipsetBundle, HEADERS,
MESSAGES,
},
hello::{HelloRequest, HelloResponse},
rpc::RequestResponseError,
NetworkMessage, PeerId, PeerManager, BITSWAP_TIMEOUT,
};
use anyhow::Context;
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::CborStore;
use serde::de::DeserializeOwned;
use std::future::Future;
use tokio::sync::Semaphore;
use tokio::task::JoinSet;
use tracing::{debug, trace, warn};
/// Timeout for response from an RPC request
// TODO this value can be tweaked, this is just set pretty low to avoid peers
// timing out requests from slowing the node down. If increase, should create a
// countermeasure for this.
const CHAIN_EXCHANGE_TIMEOUT: Duration = Duration::from_secs(5);
/// Maximum number of concurrent chain exchange request being sent to the
/// network.
const MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS: usize = 2;
/// Context used in chain sync to handle network requests.
/// This contains the peer manager, P2P service interface, and [`Blockstore`]
/// required to make network requests.
pub(in crate::chain_sync) struct SyncNetworkContext<DB> {
/// Channel to send network messages through P2P service
network_send: flume::Sender<NetworkMessage>,
/// Manages peers to send requests to and updates request stats for the
/// respective peers.
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
}
impl<DB> Clone for SyncNetworkContext<DB> {
fn clone(&self) -> Self {
Self {
network_send: self.network_send.clone(),
peer_manager: self.peer_manager.clone(),
db: self.db.clone(),
}
}
}
/// Race tasks to completion while limiting the number of tasks that may execute concurrently.
/// Once a task finishes without error, the rest of the tasks are canceled.
struct RaceBatch<T> {
tasks: JoinSet<Result<T, String>>,
semaphore: Arc<Semaphore>,
}
impl<T> RaceBatch<T>
where
T: Send + 'static,
{
pub fn new(max_concurrent_jobs: usize) -> Self {
RaceBatch {
tasks: JoinSet::new(),
semaphore: Arc::new(Semaphore::new(max_concurrent_jobs)),
}
}
pub fn add(&mut self, future: impl Future<Output = Result<T, String>> + Send + 'static) {
let sem = self.semaphore.clone();
self.tasks.spawn(async move {
let permit = sem
.acquire_owned()
.await
.map_err(|_| "Semaphore unexpectedly closed")?;
let result = future.await;
drop(permit);
result
});
}
/// Return first finishing `Ok` future else return `None` if all jobs failed
pub async fn get_ok(mut self) -> Option<T> {
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
return Some(value);
}
}
// So far every task have failed
None
}
}
impl<DB> SyncNetworkContext<DB>
where
DB: Blockstore,
{
pub fn new(
network_send: flume::Sender<NetworkMessage>,
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
) -> Self {
Self {
network_send,
peer_manager,
db,
}
}
/// Returns a reference to the peer manager of the network context.
pub fn peer_manager(&self) -> &PeerManager {
self.peer_manager.as_ref()
}
/// Send a `chain_exchange` request for only block headers (ignore
/// messages). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_headers(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<Arc<Tipset>>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, HEADERS)
.await
}
/// Send a `chain_exchange` request for only messages (ignore block
/// headers). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_messages(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<CompactedMessages>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, MESSAGES)
.await
}
/// Send a `chain_exchange` request for a single full tipset (includes
/// messages) If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_fts(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let mut fts = self
.handle_chain_exchange_request(peer_id, tsk, 1, HEADERS | MESSAGES)
.await?;
if fts.len() != 1 {
return Err(format!(
"Full tipset request returned {} tipsets",
fts.len()
));
}
Ok(fts.remove(0))
}
/// Requests that some content with a particular `Cid` get fetched over
/// `Bitswap` if it doesn't exist in the `BlockStore`.
pub async fn bitswap_get<TMessage: DeserializeOwned>(
&self,
content: Cid,
) -> Result<TMessage, String> {
// Check if what we are fetching over Bitswap already exists in the
// database. If it does, return it, else fetch over the network.
if let Some(b) = self.db.get_cbor(&content).map_err(|e| e.to_string())? {
return Ok(b);
}
let (tx, rx) = flume::bounded(1);
self.network_send
.send_async(NetworkMessage::BitswapRequest {
cid: content,
response_channel: tx,
})
.await
.map_err(|_| "failed to send bitswap request, network receiver dropped")?;
let success = tokio::task::spawn_blocking(move || {
rx.recv_timeout(BITSWAP_TIMEOUT).unwrap_or_default()
})
.await
.is_ok();
match self.db.get_cbor(&content) {
Ok(Some(b)) => Ok(b),
Ok(None) => Err(format!(
"Not found in db, bitswap. success: {success} cid, {content:?}"
)),
Err(e) => Err(format!(
"Error retrieving from db. success: {success} cid, {content:?}, {e}"
)),
}
}
/// Helper function to handle the peer retrieval if no peer supplied as well
/// as the logging and updating of the peer info in the `PeerManager`.
async fn handle_chain_exchange_request<T>(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
request_len: u64,
options: u64,
) -> Result<Vec<T>, String>
where
T: TryFrom<TipsetBundle, Error = String> + Send + Sync + 'static,
{
let request = ChainExchangeRequest {
start: tsk.cids().to_vec(),
request_len,
options,
};
let global_pre_time = SystemTime::now();
let network_failures = Arc::new(AtomicU64::new(0));
let lookup_failures = Arc::new(AtomicU64::new(0));
let chain_exchange_result = match peer_id {
// Specific peer is given to send request, send specifically to that peer.
Some(id) => Self::chain_exchange_request(
self.peer_manager.clone(),
self.network_send.clone(),
id,
request,
)
.await?
.into_result()?,
None => {
// No specific peer set, send requests to a shuffled set of top peers until
// a request succeeds.
let peers = self.peer_manager.top_peers_shuffled().await;
let mut batch = RaceBatch::new(MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS);
for peer_id in peers.into_iter() {
let peer_manager = self.peer_manager.clone();
let network_send = self.network_send.clone();
let request = request.clone();
let network_failures = network_failures.clone();
let lookup_failures = lookup_failures.clone();
batch.add(async move {
match Self::chain_exchange_request(
peer_manager,
network_send,
peer_id,
request,
)
.await
{
Ok(chain_exchange_result) => {
match chain_exchange_result.into_result::<T>() {
Ok(r) => Ok(r),
Err(e) => |
}
}
Err(e) => {
network_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange request to peer {peer_id:?}: {e}");
Err(e)
}
}
});
}
let make_failure_message = || {
let mut message = String::new();
message.push_str("ChainExchange request failed for all top peers. ");
message.push_str(&format!(
"{} network failures, ",
network_failures.load(Ordering::Relaxed)
));
message.push_str(&format!(
"{} lookup failures, ",
lookup_failures.load(Ordering::Relaxed)
));
message.push_str(&format!("request:\n{request:?}",));
message
};
let v = batch.get_ok().await.ok_or_else(make_failure_message)?;
debug!("Succeed: handle_chain_exchange_request");
v
}
};
// Log success for the global request with the latency from before sending.
match SystemTime::now().duration_since(global_pre_time) {
Ok(t) => self.peer_manager.log_global_success(t).await,
Err(e) => {
warn!("logged time less than before request: {}", e);
}
}
Ok(chain_exchange_result)
}
/// Send a `chain_exchange` request to the network and await response.
async fn chain_exchange_request(
peer_manager: Arc<PeerManager>,
network_send: flume::Sender<NetworkMessage>,
peer_id: PeerId,
request: ChainExchangeRequest,
) -> Result<ChainExchangeResponse, String> {
debug!("Sending ChainExchange Request to {peer_id}");
let req_pre_time = SystemTime::now();
let (tx, rx) = flume::bounded(1);
if network_send
.send_async(NetworkMessage::ChainExchangeRequest {
peer_id,
request,
response_channel: tx,
})
.await
.is_err()
{
return Err("Failed to send chain exchange request to network".to_string());
};
// Add timeout to receiving response from p2p service to avoid stalling.
// There is also a timeout inside the request-response calls, but this ensures
// this.
let res =
tokio::task::spawn_blocking(move || rx.recv_timeout(CHAIN_EXCHANGE_TIMEOUT)).await;
let res_duration = SystemTime::now()
.duration_since(req_pre_time)
.unwrap_or_default();
match res {
Ok(Ok(Ok(bs_res))) => {
// Successful response
peer_manager.log_success(peer_id, res_duration).await;
debug!("Succeeded: ChainExchange Request to {peer_id}");
Ok(bs_res)
}
Ok(Ok(Err(e))) => {
// Internal libp2p error, score failure for peer and potentially disconnect
match e {
RequestResponseError::ConnectionClosed
| RequestResponseError::DialFailure
| RequestResponseError::UnsupportedProtocols => {
peer_manager.mark_peer_bad(peer_id).await;
}
// Ignore dropping peer on timeout for now. Can't be confident yet that the
// specified timeout is adequate time.
RequestResponseError::Timeout => {
peer_manager.log_failure(peer_id, res_duration).await;
}
}
debug!("Failed: ChainExchange Request to {peer_id}");
Err(format!("Internal libp2p error: {e:?}"))
}
Ok(Err(_)) | Err(_) => {
// Sender channel internally dropped or timeout, both should log failure which
// will negatively score the peer, but not drop yet.
peer_manager.log_failure(peer_id, res_duration).await;
debug!("Timeout: ChainExchange Request to {peer_id}");
Err(format!("Chain exchange request to {peer_id} timed out"))
}
}
}
/// Send a hello request to the network (does not immediately await
/// response).
pub async fn hello_request(
&self,
peer_id: PeerId,
request: HelloRequest,
) -> anyhow::Result<(PeerId, SystemTime, Option<HelloResponse>)> {
trace!("Sending Hello Message to {}", peer_id);
// Create oneshot channel for receiving response from sent hello.
let (tx, rx) = flume::bounded(1);
// Send request into libp2p service
self.network_send
.send_async(NetworkMessage::HelloRequest {
peer_id,
request,
response_channel: tx,
})
.await
.context("Failed to send hello request: receiver dropped")?;
const HELLO_TIMEOUT: Duration = Duration::from_secs(5);
let sent = SystemTime::now();
let res = tokio::task::spawn_blocking(move || rx.recv_timeout(HELLO_TIMEOUT))
.await?
.ok();
Ok((peer_id, sent, res))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::atomic::{AtomicBool, AtomicUsize};
#[tokio::test]
async fn race_batch_ok() {
let mut batch = RaceBatch::new(3);
batch.add(async move { Ok(1) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(1));
}
#[tokio::test]
async fn race_batch_ok_faster() {
let mut batch = RaceBatch::new(3);
batch.add(async move {
tokio::time::sleep(Duration::from_secs(100)).await;
Ok(1)
});
batch.add(async move { Ok(2) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(2));
}
#[tokio::test]
async fn race_batch_none() {
let mut batch: RaceBatch<i32> = RaceBatch::new(3);
batch.add(async move { Err("kaboom".into()) });
batch.add(async move { Err("banana".into()) });
assert_eq!(batch.get_ok().await, None);
}
#[tokio::test]
async fn race_batch_semaphore() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed);
}
tokio::task::yield_now().await;
c.fetch_sub(1, Ordering::Relaxed);
Err("banana".into())
});
}
assert_eq!(batch.get_ok().await, None);
assert!(!exceeded.load(Ordering::Relaxed));
}
#[tokio::test]
async fn race_batch_semaphore_exceeded() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
// We add one more job to exceed the limit
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS + 1);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed);
}
tokio::task::yield_now().await;
c.fetch_sub(1, Ordering::Relaxed);
Err("banana".into())
});
}
assert_eq!(batch.get_ok().await, None);
assert!(exceeded.load(Ordering::Relaxed));
}
}
| {
lookup_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange response: {e}");
Err(e)
} | conditional_block |
network_context.rs | // Copyright 2019-2023 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
convert::TryFrom,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::{Duration, SystemTime}, | use crate::blocks::{FullTipset, Tipset, TipsetKeys};
use crate::libp2p::{
chain_exchange::{
ChainExchangeRequest, ChainExchangeResponse, CompactedMessages, TipsetBundle, HEADERS,
MESSAGES,
},
hello::{HelloRequest, HelloResponse},
rpc::RequestResponseError,
NetworkMessage, PeerId, PeerManager, BITSWAP_TIMEOUT,
};
use anyhow::Context;
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::CborStore;
use serde::de::DeserializeOwned;
use std::future::Future;
use tokio::sync::Semaphore;
use tokio::task::JoinSet;
use tracing::{debug, trace, warn};
/// Timeout for response from an RPC request
// TODO this value can be tweaked, this is just set pretty low to avoid peers
// timing out requests from slowing the node down. If increase, should create a
// countermeasure for this.
const CHAIN_EXCHANGE_TIMEOUT: Duration = Duration::from_secs(5);
/// Maximum number of concurrent chain exchange request being sent to the
/// network.
const MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS: usize = 2;
/// Context used in chain sync to handle network requests.
/// This contains the peer manager, P2P service interface, and [`Blockstore`]
/// required to make network requests.
pub(in crate::chain_sync) struct SyncNetworkContext<DB> {
/// Channel to send network messages through P2P service
network_send: flume::Sender<NetworkMessage>,
/// Manages peers to send requests to and updates request stats for the
/// respective peers.
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
}
impl<DB> Clone for SyncNetworkContext<DB> {
fn clone(&self) -> Self {
Self {
network_send: self.network_send.clone(),
peer_manager: self.peer_manager.clone(),
db: self.db.clone(),
}
}
}
/// Race tasks to completion while limiting the number of tasks that may execute concurrently.
/// Once a task finishes without error, the rest of the tasks are canceled.
struct RaceBatch<T> {
tasks: JoinSet<Result<T, String>>,
semaphore: Arc<Semaphore>,
}
impl<T> RaceBatch<T>
where
T: Send + 'static,
{
pub fn new(max_concurrent_jobs: usize) -> Self {
RaceBatch {
tasks: JoinSet::new(),
semaphore: Arc::new(Semaphore::new(max_concurrent_jobs)),
}
}
pub fn add(&mut self, future: impl Future<Output = Result<T, String>> + Send + 'static) {
let sem = self.semaphore.clone();
self.tasks.spawn(async move {
let permit = sem
.acquire_owned()
.await
.map_err(|_| "Semaphore unexpectedly closed")?;
let result = future.await;
drop(permit);
result
});
}
/// Return first finishing `Ok` future else return `None` if all jobs failed
pub async fn get_ok(mut self) -> Option<T> {
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
return Some(value);
}
}
// So far every task have failed
None
}
}
impl<DB> SyncNetworkContext<DB>
where
DB: Blockstore,
{
pub fn new(
network_send: flume::Sender<NetworkMessage>,
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
) -> Self {
Self {
network_send,
peer_manager,
db,
}
}
/// Returns a reference to the peer manager of the network context.
pub fn peer_manager(&self) -> &PeerManager {
self.peer_manager.as_ref()
}
/// Send a `chain_exchange` request for only block headers (ignore
/// messages). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_headers(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<Arc<Tipset>>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, HEADERS)
.await
}
/// Send a `chain_exchange` request for only messages (ignore block
/// headers). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_messages(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<CompactedMessages>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, MESSAGES)
.await
}
/// Send a `chain_exchange` request for a single full tipset (includes
/// messages) If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_fts(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let mut fts = self
.handle_chain_exchange_request(peer_id, tsk, 1, HEADERS | MESSAGES)
.await?;
if fts.len() != 1 {
return Err(format!(
"Full tipset request returned {} tipsets",
fts.len()
));
}
Ok(fts.remove(0))
}
/// Requests that some content with a particular `Cid` get fetched over
/// `Bitswap` if it doesn't exist in the `BlockStore`.
pub async fn bitswap_get<TMessage: DeserializeOwned>(
&self,
content: Cid,
) -> Result<TMessage, String> {
// Check if what we are fetching over Bitswap already exists in the
// database. If it does, return it, else fetch over the network.
if let Some(b) = self.db.get_cbor(&content).map_err(|e| e.to_string())? {
return Ok(b);
}
let (tx, rx) = flume::bounded(1);
self.network_send
.send_async(NetworkMessage::BitswapRequest {
cid: content,
response_channel: tx,
})
.await
.map_err(|_| "failed to send bitswap request, network receiver dropped")?;
let success = tokio::task::spawn_blocking(move || {
rx.recv_timeout(BITSWAP_TIMEOUT).unwrap_or_default()
})
.await
.is_ok();
match self.db.get_cbor(&content) {
Ok(Some(b)) => Ok(b),
Ok(None) => Err(format!(
"Not found in db, bitswap. success: {success} cid, {content:?}"
)),
Err(e) => Err(format!(
"Error retrieving from db. success: {success} cid, {content:?}, {e}"
)),
}
}
/// Helper function to handle the peer retrieval if no peer supplied as well
/// as the logging and updating of the peer info in the `PeerManager`.
async fn handle_chain_exchange_request<T>(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
request_len: u64,
options: u64,
) -> Result<Vec<T>, String>
where
T: TryFrom<TipsetBundle, Error = String> + Send + Sync + 'static,
{
let request = ChainExchangeRequest {
start: tsk.cids().to_vec(),
request_len,
options,
};
let global_pre_time = SystemTime::now();
let network_failures = Arc::new(AtomicU64::new(0));
let lookup_failures = Arc::new(AtomicU64::new(0));
let chain_exchange_result = match peer_id {
// Specific peer is given to send request, send specifically to that peer.
Some(id) => Self::chain_exchange_request(
self.peer_manager.clone(),
self.network_send.clone(),
id,
request,
)
.await?
.into_result()?,
None => {
// No specific peer set, send requests to a shuffled set of top peers until
// a request succeeds.
let peers = self.peer_manager.top_peers_shuffled().await;
let mut batch = RaceBatch::new(MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS);
for peer_id in peers.into_iter() {
let peer_manager = self.peer_manager.clone();
let network_send = self.network_send.clone();
let request = request.clone();
let network_failures = network_failures.clone();
let lookup_failures = lookup_failures.clone();
batch.add(async move {
match Self::chain_exchange_request(
peer_manager,
network_send,
peer_id,
request,
)
.await
{
Ok(chain_exchange_result) => {
match chain_exchange_result.into_result::<T>() {
Ok(r) => Ok(r),
Err(e) => {
lookup_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange response: {e}");
Err(e)
}
}
}
Err(e) => {
network_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange request to peer {peer_id:?}: {e}");
Err(e)
}
}
});
}
let make_failure_message = || {
let mut message = String::new();
message.push_str("ChainExchange request failed for all top peers. ");
message.push_str(&format!(
"{} network failures, ",
network_failures.load(Ordering::Relaxed)
));
message.push_str(&format!(
"{} lookup failures, ",
lookup_failures.load(Ordering::Relaxed)
));
message.push_str(&format!("request:\n{request:?}",));
message
};
let v = batch.get_ok().await.ok_or_else(make_failure_message)?;
debug!("Succeed: handle_chain_exchange_request");
v
}
};
// Log success for the global request with the latency from before sending.
match SystemTime::now().duration_since(global_pre_time) {
Ok(t) => self.peer_manager.log_global_success(t).await,
Err(e) => {
warn!("logged time less than before request: {}", e);
}
}
Ok(chain_exchange_result)
}
/// Send a `chain_exchange` request to the network and await response.
async fn chain_exchange_request(
peer_manager: Arc<PeerManager>,
network_send: flume::Sender<NetworkMessage>,
peer_id: PeerId,
request: ChainExchangeRequest,
) -> Result<ChainExchangeResponse, String> {
debug!("Sending ChainExchange Request to {peer_id}");
let req_pre_time = SystemTime::now();
let (tx, rx) = flume::bounded(1);
if network_send
.send_async(NetworkMessage::ChainExchangeRequest {
peer_id,
request,
response_channel: tx,
})
.await
.is_err()
{
return Err("Failed to send chain exchange request to network".to_string());
};
// Add timeout to receiving response from p2p service to avoid stalling.
// There is also a timeout inside the request-response calls, but this ensures
// this.
let res =
tokio::task::spawn_blocking(move || rx.recv_timeout(CHAIN_EXCHANGE_TIMEOUT)).await;
let res_duration = SystemTime::now()
.duration_since(req_pre_time)
.unwrap_or_default();
match res {
Ok(Ok(Ok(bs_res))) => {
// Successful response
peer_manager.log_success(peer_id, res_duration).await;
debug!("Succeeded: ChainExchange Request to {peer_id}");
Ok(bs_res)
}
Ok(Ok(Err(e))) => {
// Internal libp2p error, score failure for peer and potentially disconnect
match e {
RequestResponseError::ConnectionClosed
| RequestResponseError::DialFailure
| RequestResponseError::UnsupportedProtocols => {
peer_manager.mark_peer_bad(peer_id).await;
}
// Ignore dropping peer on timeout for now. Can't be confident yet that the
// specified timeout is adequate time.
RequestResponseError::Timeout => {
peer_manager.log_failure(peer_id, res_duration).await;
}
}
debug!("Failed: ChainExchange Request to {peer_id}");
Err(format!("Internal libp2p error: {e:?}"))
}
Ok(Err(_)) | Err(_) => {
// Sender channel internally dropped or timeout, both should log failure which
// will negatively score the peer, but not drop yet.
peer_manager.log_failure(peer_id, res_duration).await;
debug!("Timeout: ChainExchange Request to {peer_id}");
Err(format!("Chain exchange request to {peer_id} timed out"))
}
}
}
/// Send a hello request to the network (does not immediately await
/// response).
pub async fn hello_request(
&self,
peer_id: PeerId,
request: HelloRequest,
) -> anyhow::Result<(PeerId, SystemTime, Option<HelloResponse>)> {
trace!("Sending Hello Message to {}", peer_id);
// Create oneshot channel for receiving response from sent hello.
let (tx, rx) = flume::bounded(1);
// Send request into libp2p service
self.network_send
.send_async(NetworkMessage::HelloRequest {
peer_id,
request,
response_channel: tx,
})
.await
.context("Failed to send hello request: receiver dropped")?;
const HELLO_TIMEOUT: Duration = Duration::from_secs(5);
let sent = SystemTime::now();
let res = tokio::task::spawn_blocking(move || rx.recv_timeout(HELLO_TIMEOUT))
.await?
.ok();
Ok((peer_id, sent, res))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::atomic::{AtomicBool, AtomicUsize};
#[tokio::test]
async fn race_batch_ok() {
let mut batch = RaceBatch::new(3);
batch.add(async move { Ok(1) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(1));
}
#[tokio::test]
async fn race_batch_ok_faster() {
let mut batch = RaceBatch::new(3);
batch.add(async move {
tokio::time::sleep(Duration::from_secs(100)).await;
Ok(1)
});
batch.add(async move { Ok(2) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(2));
}
#[tokio::test]
async fn race_batch_none() {
let mut batch: RaceBatch<i32> = RaceBatch::new(3);
batch.add(async move { Err("kaboom".into()) });
batch.add(async move { Err("banana".into()) });
assert_eq!(batch.get_ok().await, None);
}
#[tokio::test]
async fn race_batch_semaphore() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed);
}
tokio::task::yield_now().await;
c.fetch_sub(1, Ordering::Relaxed);
Err("banana".into())
});
}
assert_eq!(batch.get_ok().await, None);
assert!(!exceeded.load(Ordering::Relaxed));
}
#[tokio::test]
async fn race_batch_semaphore_exceeded() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
// We add one more job to exceed the limit
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS + 1);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed);
}
tokio::task::yield_now().await;
c.fetch_sub(1, Ordering::Relaxed);
Err("banana".into())
});
}
assert_eq!(batch.get_ok().await, None);
assert!(exceeded.load(Ordering::Relaxed));
}
} | };
| random_line_split |
model.py | import time
import os
import boto3
from zipfile import ZipFile
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, Conv2DTranspose, Reshape
from keras.layers import Flatten, BatchNormalization, Dense, Activation
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Here is where we will load the dataset stored in dataset_path. In this script
# we will use the Caltech-UCSD Birds-200-2011 dataset which includes 11788
# images from 200 different birds. We will feed the images without applying
# the provided bounding boxes from the dataset. The data will only be resized
# and normalized. Keras ImageDataGenerator will be used for loading the dataset
def load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape):
#s3 = boto3.client('s3',
# aws_access_key_id = aws_access,
# aws_secret_access_key = aws_secret)
#s3.download_file('crystals-gdg', 'raw.zip', 'raw.zip')
#zip_ref = ZipFile('raw.zip', 'r')
#zip_ref.extractall()
#zip_ref.close()
#os.remove('raw.zip')
dataset_generator = ImageDataGenerator()
dataset_generator = dataset_generator.flow_from_directory(
dataset_path, target_size=(image_shape[0], image_shape[1]),
batch_size=batch_size,
class_mode=None)
return dataset_generator
# Creates the discriminator model. This model tries to classify images as real
# or fake.
def construct_discriminator(image_shape):
discriminator = Sequential()
discriminator.add(Conv2D(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform',
input_shape=(image_shape)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=512, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Flatten())
discriminator.add(Dense(1))
discriminator.add(Activation('sigmoid'))
optimizer = Adam(lr=0.0002, beta_1=0.5)
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return discriminator
# Creates the generator model. This model has an input of random noise and
# generates an image that will try mislead the discriminator.
def construct_generator():
generator = Sequential()
generator.add(Dense(units=8 * 8 * 512,
kernel_initializer='glorot_uniform',
input_shape=(1, 1, 100)))
generator.add(Reshape(target_shape=(8, 8, 512)))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=3, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(Activation('tanh'))
optimizer = Adam(lr=0.00015, beta_1=0.5)
generator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return generator
# Displays a figure of the generated images and saves them in as .png image
def | (generated_images, epoch, batch_number):
plt.figure(figsize=(8, 8), num=2)
gs1 = gridspec.GridSpec(8, 8)
gs1.update(wspace=0, hspace=0)
for i in range(64):
ax1 = plt.subplot(gs1[i])
ax1.set_aspect('equal')
image = generated_images[i, :, :, :]
image += 1
image *= 127.5
fig = plt.imshow(image.astype(np.uint8))
plt.axis('off')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.tight_layout()
save_name = 'generated/generatedSamples_epoch' + str(
epoch + 1) + '_batch' + str(batch_number + 1) + '.png'
plt.savefig(save_name, bbox_inches='tight', pad_inches=0)
plt.pause(0.0000000001)
plt.show()
# Main train function
def train_dcgan(aws_access, aws_secret, batch_size, epochs, image_shape, dataset_path):
# Build the adversarial model that consists in the generator output
# connected to the discriminator
generator = construct_generator()
discriminator = construct_discriminator(image_shape)
gan = Sequential()
# Only false for the adversarial model
discriminator.trainable = False
gan.add(generator)
gan.add(discriminator)
optimizer = Adam(lr=0.00015, beta_1=0.5)
gan.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=None)
# Create a dataset Generator with help of keras
dataset_generator = load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape)
# 11788 is the total number of images on the bird dataset
number_of_batches = int(11788 / batch_size)
# Variables that will be used to plot the losses from the discriminator and
# the adversarial models
adversarial_loss = np.empty(shape=1)
discriminator_loss = np.empty(shape=1)
batches = np.empty(shape=1)
# Allo plot updates inside for loop
plt.ion()
current_batch = 0
# Let's train the DCGAN for n epochs
for epoch in range(epochs):
print("Epoch " + str(epoch+1) + "/" + str(epochs) + " :")
for batch_number in range(number_of_batches):
start_time = time.time()
# Get the current batch and normalize the images between -1 and 1
real_images = dataset_generator.next()
real_images /= 127.5
real_images -= 1
# The last batch is smaller than the other ones, so we need to
# take that into account
current_batch_size = real_images.shape[0]
# Generate noise
noise = np.random.normal(0, 1,
size=(current_batch_size,) + (1, 1, 100))
# Generate images
generated_images = generator.predict(noise)
# Add some noise to the labels that will be
# fed to the discriminator
real_y = (np.ones(current_batch_size) -
np.random.random_sample(current_batch_size) * 0.2)
fake_y = np.random.random_sample(current_batch_size) * 0.2
# Let's train the discriminator
discriminator.trainable = True
d_loss = discriminator.train_on_batch(real_images, real_y)
d_loss += discriminator.train_on_batch(generated_images, fake_y)
discriminator_loss = np.append(discriminator_loss, d_loss)
# Now it's time to train the generator
discriminator.trainable = False
noise = np.random.normal(0, 1,
size=(current_batch_size * 2,) +
(1, 1, 100))
# We try to mislead the discriminator by giving the opposite labels
fake_y = (np.ones(current_batch_size * 2) -
np.random.random_sample(current_batch_size * 2) * 0.2)
g_loss = gan.train_on_batch(noise, fake_y)
adversarial_loss = np.append(adversarial_loss, g_loss)
batches = np.append(batches, current_batch)
# Each 50 batches show and save images
if((batch_number + 1) % 50 == 0 and
current_batch_size == batch_size):
save_generated_images(generated_images, epoch, batch_number)
time_elapsed = time.time() - start_time
# Display and plot the results
print(" Batch " + str(batch_number + 1) + "/" +
str(number_of_batches) +
" generator loss | discriminator loss : " +
str(g_loss) + " | " + str(d_loss) + ' - batch took ' +
str(time_elapsed) + ' s.')
current_batch += 1
# Save the model weights each 5 epochs
if (epoch + 1) % 5 == 0:
discriminator.trainable = True
generator.save('generated/generator_epoch' + str(epoch) + '.hdf5')
discriminator.save('discriminator_epoch' +
str(epoch) + '.hdf5')
# Each epoch update the loss graphs
plt.figure(1)
plt.plot(batches, adversarial_loss, color='green',
label='Generator Loss')
plt.plot(batches, discriminator_loss, color='blue',
label='Discriminator Loss')
plt.title("DCGAN Train")
plt.xlabel("Batch Iteration")
plt.ylabel("Loss")
if epoch == 0:
plt.legend()
plt.pause(0.0000000001)
plt.show()
plt.savefig('trainingLossPlot.png')
def main():
aws_secret = ''
aws_access = ''
dataset_path = '/home/jupyter/tutorials/generative-crystals/model/raw'
batch_size = 64
image_shape = (128, 128, 3)
epochs = 100
train_dcgan(aws_access, aws_secret, batch_size, epochs,
image_shape, dataset_path)
if __name__ == "__main__":
main()
| save_generated_images | identifier_name |
model.py | import time
import os
import boto3
from zipfile import ZipFile
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, Conv2DTranspose, Reshape
from keras.layers import Flatten, BatchNormalization, Dense, Activation
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Here is where we will load the dataset stored in dataset_path. In this script
# we will use the Caltech-UCSD Birds-200-2011 dataset which includes 11788
# images from 200 different birds. We will feed the images without applying
# the provided bounding boxes from the dataset. The data will only be resized
# and normalized. Keras ImageDataGenerator will be used for loading the dataset
def load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape):
#s3 = boto3.client('s3',
# aws_access_key_id = aws_access,
# aws_secret_access_key = aws_secret)
#s3.download_file('crystals-gdg', 'raw.zip', 'raw.zip')
#zip_ref = ZipFile('raw.zip', 'r')
#zip_ref.extractall()
#zip_ref.close()
#os.remove('raw.zip')
dataset_generator = ImageDataGenerator()
dataset_generator = dataset_generator.flow_from_directory(
dataset_path, target_size=(image_shape[0], image_shape[1]),
batch_size=batch_size,
class_mode=None)
return dataset_generator
# Creates the discriminator model. This model tries to classify images as real
# or fake.
def construct_discriminator(image_shape):
discriminator = Sequential()
discriminator.add(Conv2D(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform',
input_shape=(image_shape)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=512, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Flatten())
discriminator.add(Dense(1))
discriminator.add(Activation('sigmoid'))
optimizer = Adam(lr=0.0002, beta_1=0.5)
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return discriminator
# Creates the generator model. This model has an input of random noise and
# generates an image that will try mislead the discriminator.
def construct_generator():
generator = Sequential()
generator.add(Dense(units=8 * 8 * 512,
kernel_initializer='glorot_uniform',
input_shape=(1, 1, 100)))
generator.add(Reshape(target_shape=(8, 8, 512)))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=3, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(Activation('tanh'))
optimizer = Adam(lr=0.00015, beta_1=0.5)
generator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return generator
# Displays a figure of the generated images and saves them in as .png image
def save_generated_images(generated_images, epoch, batch_number):
plt.figure(figsize=(8, 8), num=2)
gs1 = gridspec.GridSpec(8, 8)
gs1.update(wspace=0, hspace=0)
for i in range(64):
ax1 = plt.subplot(gs1[i])
ax1.set_aspect('equal')
image = generated_images[i, :, :, :]
image += 1
image *= 127.5
fig = plt.imshow(image.astype(np.uint8))
plt.axis('off')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.tight_layout()
save_name = 'generated/generatedSamples_epoch' + str(
epoch + 1) + '_batch' + str(batch_number + 1) + '.png'
plt.savefig(save_name, bbox_inches='tight', pad_inches=0)
plt.pause(0.0000000001)
plt.show()
# Main train function
def train_dcgan(aws_access, aws_secret, batch_size, epochs, image_shape, dataset_path):
# Build the adversarial model that consists in the generator output
# connected to the discriminator
|
def main():
aws_secret = ''
aws_access = ''
dataset_path = '/home/jupyter/tutorials/generative-crystals/model/raw'
batch_size = 64
image_shape = (128, 128, 3)
epochs = 100
train_dcgan(aws_access, aws_secret, batch_size, epochs,
image_shape, dataset_path)
if __name__ == "__main__":
main()
| generator = construct_generator()
discriminator = construct_discriminator(image_shape)
gan = Sequential()
# Only false for the adversarial model
discriminator.trainable = False
gan.add(generator)
gan.add(discriminator)
optimizer = Adam(lr=0.00015, beta_1=0.5)
gan.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=None)
# Create a dataset Generator with help of keras
dataset_generator = load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape)
# 11788 is the total number of images on the bird dataset
number_of_batches = int(11788 / batch_size)
# Variables that will be used to plot the losses from the discriminator and
# the adversarial models
adversarial_loss = np.empty(shape=1)
discriminator_loss = np.empty(shape=1)
batches = np.empty(shape=1)
# Allo plot updates inside for loop
plt.ion()
current_batch = 0
# Let's train the DCGAN for n epochs
for epoch in range(epochs):
print("Epoch " + str(epoch+1) + "/" + str(epochs) + " :")
for batch_number in range(number_of_batches):
start_time = time.time()
# Get the current batch and normalize the images between -1 and 1
real_images = dataset_generator.next()
real_images /= 127.5
real_images -= 1
# The last batch is smaller than the other ones, so we need to
# take that into account
current_batch_size = real_images.shape[0]
# Generate noise
noise = np.random.normal(0, 1,
size=(current_batch_size,) + (1, 1, 100))
# Generate images
generated_images = generator.predict(noise)
# Add some noise to the labels that will be
# fed to the discriminator
real_y = (np.ones(current_batch_size) -
np.random.random_sample(current_batch_size) * 0.2)
fake_y = np.random.random_sample(current_batch_size) * 0.2
# Let's train the discriminator
discriminator.trainable = True
d_loss = discriminator.train_on_batch(real_images, real_y)
d_loss += discriminator.train_on_batch(generated_images, fake_y)
discriminator_loss = np.append(discriminator_loss, d_loss)
# Now it's time to train the generator
discriminator.trainable = False
noise = np.random.normal(0, 1,
size=(current_batch_size * 2,) +
(1, 1, 100))
# We try to mislead the discriminator by giving the opposite labels
fake_y = (np.ones(current_batch_size * 2) -
np.random.random_sample(current_batch_size * 2) * 0.2)
g_loss = gan.train_on_batch(noise, fake_y)
adversarial_loss = np.append(adversarial_loss, g_loss)
batches = np.append(batches, current_batch)
# Each 50 batches show and save images
if((batch_number + 1) % 50 == 0 and
current_batch_size == batch_size):
save_generated_images(generated_images, epoch, batch_number)
time_elapsed = time.time() - start_time
# Display and plot the results
print(" Batch " + str(batch_number + 1) + "/" +
str(number_of_batches) +
" generator loss | discriminator loss : " +
str(g_loss) + " | " + str(d_loss) + ' - batch took ' +
str(time_elapsed) + ' s.')
current_batch += 1
# Save the model weights each 5 epochs
if (epoch + 1) % 5 == 0:
discriminator.trainable = True
generator.save('generated/generator_epoch' + str(epoch) + '.hdf5')
discriminator.save('discriminator_epoch' +
str(epoch) + '.hdf5')
# Each epoch update the loss graphs
plt.figure(1)
plt.plot(batches, adversarial_loss, color='green',
label='Generator Loss')
plt.plot(batches, discriminator_loss, color='blue',
label='Discriminator Loss')
plt.title("DCGAN Train")
plt.xlabel("Batch Iteration")
plt.ylabel("Loss")
if epoch == 0:
plt.legend()
plt.pause(0.0000000001)
plt.show()
plt.savefig('trainingLossPlot.png') | identifier_body |
model.py | import time
import os
import boto3
from zipfile import ZipFile
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, Conv2DTranspose, Reshape
from keras.layers import Flatten, BatchNormalization, Dense, Activation
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Here is where we will load the dataset stored in dataset_path. In this script
# we will use the Caltech-UCSD Birds-200-2011 dataset which includes 11788
# images from 200 different birds. We will feed the images without applying
# the provided bounding boxes from the dataset. The data will only be resized
# and normalized. Keras ImageDataGenerator will be used for loading the dataset
def load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape):
#s3 = boto3.client('s3',
# aws_access_key_id = aws_access,
# aws_secret_access_key = aws_secret)
#s3.download_file('crystals-gdg', 'raw.zip', 'raw.zip')
#zip_ref = ZipFile('raw.zip', 'r')
#zip_ref.extractall()
#zip_ref.close()
#os.remove('raw.zip')
dataset_generator = ImageDataGenerator()
dataset_generator = dataset_generator.flow_from_directory(
dataset_path, target_size=(image_shape[0], image_shape[1]),
batch_size=batch_size,
class_mode=None)
return dataset_generator
# Creates the discriminator model. This model tries to classify images as real
# or fake.
def construct_discriminator(image_shape):
discriminator = Sequential()
discriminator.add(Conv2D(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform',
input_shape=(image_shape)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=512, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Flatten())
discriminator.add(Dense(1))
discriminator.add(Activation('sigmoid'))
optimizer = Adam(lr=0.0002, beta_1=0.5)
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return discriminator
# Creates the generator model. This model has an input of random noise and
# generates an image that will try mislead the discriminator.
def construct_generator():
generator = Sequential()
generator.add(Dense(units=8 * 8 * 512,
kernel_initializer='glorot_uniform',
input_shape=(1, 1, 100)))
generator.add(Reshape(target_shape=(8, 8, 512)))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=3, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(Activation('tanh'))
optimizer = Adam(lr=0.00015, beta_1=0.5)
generator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return generator
# Displays a figure of the generated images and saves them in as .png image
def save_generated_images(generated_images, epoch, batch_number):
plt.figure(figsize=(8, 8), num=2)
gs1 = gridspec.GridSpec(8, 8)
gs1.update(wspace=0, hspace=0)
for i in range(64):
ax1 = plt.subplot(gs1[i])
ax1.set_aspect('equal')
image = generated_images[i, :, :, :]
image += 1
image *= 127.5
fig = plt.imshow(image.astype(np.uint8))
plt.axis('off')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.tight_layout()
save_name = 'generated/generatedSamples_epoch' + str(
epoch + 1) + '_batch' + str(batch_number + 1) + '.png'
plt.savefig(save_name, bbox_inches='tight', pad_inches=0)
plt.pause(0.0000000001)
plt.show()
# Main train function
def train_dcgan(aws_access, aws_secret, batch_size, epochs, image_shape, dataset_path):
# Build the adversarial model that consists in the generator output
# connected to the discriminator
generator = construct_generator()
discriminator = construct_discriminator(image_shape)
gan = Sequential()
# Only false for the adversarial model
discriminator.trainable = False
gan.add(generator)
gan.add(discriminator)
optimizer = Adam(lr=0.00015, beta_1=0.5)
gan.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=None)
# Create a dataset Generator with help of keras
dataset_generator = load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape)
# 11788 is the total number of images on the bird dataset
number_of_batches = int(11788 / batch_size)
# Variables that will be used to plot the losses from the discriminator and
# the adversarial models
adversarial_loss = np.empty(shape=1)
discriminator_loss = np.empty(shape=1)
batches = np.empty(shape=1)
# Allo plot updates inside for loop
plt.ion()
current_batch = 0
# Let's train the DCGAN for n epochs
for epoch in range(epochs):
print("Epoch " + str(epoch+1) + "/" + str(epochs) + " :")
for batch_number in range(number_of_batches):
start_time = time.time()
# Get the current batch and normalize the images between -1 and 1
real_images = dataset_generator.next()
real_images /= 127.5
real_images -= 1
# The last batch is smaller than the other ones, so we need to
# take that into account
current_batch_size = real_images.shape[0]
# Generate noise
noise = np.random.normal(0, 1,
size=(current_batch_size,) + (1, 1, 100))
# Generate images
generated_images = generator.predict(noise)
# Add some noise to the labels that will be
# fed to the discriminator
real_y = (np.ones(current_batch_size) -
np.random.random_sample(current_batch_size) * 0.2)
fake_y = np.random.random_sample(current_batch_size) * 0.2
# Let's train the discriminator
discriminator.trainable = True
d_loss = discriminator.train_on_batch(real_images, real_y)
d_loss += discriminator.train_on_batch(generated_images, fake_y)
discriminator_loss = np.append(discriminator_loss, d_loss)
# Now it's time to train the generator
discriminator.trainable = False
noise = np.random.normal(0, 1,
size=(current_batch_size * 2,) +
(1, 1, 100))
# We try to mislead the discriminator by giving the opposite labels
fake_y = (np.ones(current_batch_size * 2) -
np.random.random_sample(current_batch_size * 2) * 0.2)
g_loss = gan.train_on_batch(noise, fake_y)
adversarial_loss = np.append(adversarial_loss, g_loss)
batches = np.append(batches, current_batch)
# Each 50 batches show and save images
if((batch_number + 1) % 50 == 0 and
current_batch_size == batch_size):
|
time_elapsed = time.time() - start_time
# Display and plot the results
print(" Batch " + str(batch_number + 1) + "/" +
str(number_of_batches) +
" generator loss | discriminator loss : " +
str(g_loss) + " | " + str(d_loss) + ' - batch took ' +
str(time_elapsed) + ' s.')
current_batch += 1
# Save the model weights each 5 epochs
if (epoch + 1) % 5 == 0:
discriminator.trainable = True
generator.save('generated/generator_epoch' + str(epoch) + '.hdf5')
discriminator.save('discriminator_epoch' +
str(epoch) + '.hdf5')
# Each epoch update the loss graphs
plt.figure(1)
plt.plot(batches, adversarial_loss, color='green',
label='Generator Loss')
plt.plot(batches, discriminator_loss, color='blue',
label='Discriminator Loss')
plt.title("DCGAN Train")
plt.xlabel("Batch Iteration")
plt.ylabel("Loss")
if epoch == 0:
plt.legend()
plt.pause(0.0000000001)
plt.show()
plt.savefig('trainingLossPlot.png')
def main():
aws_secret = ''
aws_access = ''
dataset_path = '/home/jupyter/tutorials/generative-crystals/model/raw'
batch_size = 64
image_shape = (128, 128, 3)
epochs = 100
train_dcgan(aws_access, aws_secret, batch_size, epochs,
image_shape, dataset_path)
if __name__ == "__main__":
main()
| save_generated_images(generated_images, epoch, batch_number) | conditional_block |
model.py | import time | import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, Conv2DTranspose, Reshape
from keras.layers import Flatten, BatchNormalization, Dense, Activation
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Here is where we will load the dataset stored in dataset_path. In this script
# we will use the Caltech-UCSD Birds-200-2011 dataset which includes 11788
# images from 200 different birds. We will feed the images without applying
# the provided bounding boxes from the dataset. The data will only be resized
# and normalized. Keras ImageDataGenerator will be used for loading the dataset
def load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape):
#s3 = boto3.client('s3',
# aws_access_key_id = aws_access,
# aws_secret_access_key = aws_secret)
#s3.download_file('crystals-gdg', 'raw.zip', 'raw.zip')
#zip_ref = ZipFile('raw.zip', 'r')
#zip_ref.extractall()
#zip_ref.close()
#os.remove('raw.zip')
dataset_generator = ImageDataGenerator()
dataset_generator = dataset_generator.flow_from_directory(
dataset_path, target_size=(image_shape[0], image_shape[1]),
batch_size=batch_size,
class_mode=None)
return dataset_generator
# Creates the discriminator model. This model tries to classify images as real
# or fake.
def construct_discriminator(image_shape):
discriminator = Sequential()
discriminator.add(Conv2D(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform',
input_shape=(image_shape)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=512, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Flatten())
discriminator.add(Dense(1))
discriminator.add(Activation('sigmoid'))
optimizer = Adam(lr=0.0002, beta_1=0.5)
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return discriminator
# Creates the generator model. This model has an input of random noise and
# generates an image that will try mislead the discriminator.
def construct_generator():
generator = Sequential()
generator.add(Dense(units=8 * 8 * 512,
kernel_initializer='glorot_uniform',
input_shape=(1, 1, 100)))
generator.add(Reshape(target_shape=(8, 8, 512)))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=3, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(Activation('tanh'))
optimizer = Adam(lr=0.00015, beta_1=0.5)
generator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return generator
# Displays a figure of the generated images and saves them in as .png image
def save_generated_images(generated_images, epoch, batch_number):
plt.figure(figsize=(8, 8), num=2)
gs1 = gridspec.GridSpec(8, 8)
gs1.update(wspace=0, hspace=0)
for i in range(64):
ax1 = plt.subplot(gs1[i])
ax1.set_aspect('equal')
image = generated_images[i, :, :, :]
image += 1
image *= 127.5
fig = plt.imshow(image.astype(np.uint8))
plt.axis('off')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.tight_layout()
save_name = 'generated/generatedSamples_epoch' + str(
epoch + 1) + '_batch' + str(batch_number + 1) + '.png'
plt.savefig(save_name, bbox_inches='tight', pad_inches=0)
plt.pause(0.0000000001)
plt.show()
# Main train function
def train_dcgan(aws_access, aws_secret, batch_size, epochs, image_shape, dataset_path):
# Build the adversarial model that consists in the generator output
# connected to the discriminator
generator = construct_generator()
discriminator = construct_discriminator(image_shape)
gan = Sequential()
# Only false for the adversarial model
discriminator.trainable = False
gan.add(generator)
gan.add(discriminator)
optimizer = Adam(lr=0.00015, beta_1=0.5)
gan.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=None)
# Create a dataset Generator with help of keras
dataset_generator = load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape)
# 11788 is the total number of images on the bird dataset
number_of_batches = int(11788 / batch_size)
# Variables that will be used to plot the losses from the discriminator and
# the adversarial models
adversarial_loss = np.empty(shape=1)
discriminator_loss = np.empty(shape=1)
batches = np.empty(shape=1)
# Allo plot updates inside for loop
plt.ion()
current_batch = 0
# Let's train the DCGAN for n epochs
for epoch in range(epochs):
print("Epoch " + str(epoch+1) + "/" + str(epochs) + " :")
for batch_number in range(number_of_batches):
start_time = time.time()
# Get the current batch and normalize the images between -1 and 1
real_images = dataset_generator.next()
real_images /= 127.5
real_images -= 1
# The last batch is smaller than the other ones, so we need to
# take that into account
current_batch_size = real_images.shape[0]
# Generate noise
noise = np.random.normal(0, 1,
size=(current_batch_size,) + (1, 1, 100))
# Generate images
generated_images = generator.predict(noise)
# Add some noise to the labels that will be
# fed to the discriminator
real_y = (np.ones(current_batch_size) -
np.random.random_sample(current_batch_size) * 0.2)
fake_y = np.random.random_sample(current_batch_size) * 0.2
# Let's train the discriminator
discriminator.trainable = True
d_loss = discriminator.train_on_batch(real_images, real_y)
d_loss += discriminator.train_on_batch(generated_images, fake_y)
discriminator_loss = np.append(discriminator_loss, d_loss)
# Now it's time to train the generator
discriminator.trainable = False
noise = np.random.normal(0, 1,
size=(current_batch_size * 2,) +
(1, 1, 100))
# We try to mislead the discriminator by giving the opposite labels
fake_y = (np.ones(current_batch_size * 2) -
np.random.random_sample(current_batch_size * 2) * 0.2)
g_loss = gan.train_on_batch(noise, fake_y)
adversarial_loss = np.append(adversarial_loss, g_loss)
batches = np.append(batches, current_batch)
# Each 50 batches show and save images
if((batch_number + 1) % 50 == 0 and
current_batch_size == batch_size):
save_generated_images(generated_images, epoch, batch_number)
time_elapsed = time.time() - start_time
# Display and plot the results
print(" Batch " + str(batch_number + 1) + "/" +
str(number_of_batches) +
" generator loss | discriminator loss : " +
str(g_loss) + " | " + str(d_loss) + ' - batch took ' +
str(time_elapsed) + ' s.')
current_batch += 1
# Save the model weights each 5 epochs
if (epoch + 1) % 5 == 0:
discriminator.trainable = True
generator.save('generated/generator_epoch' + str(epoch) + '.hdf5')
discriminator.save('discriminator_epoch' +
str(epoch) + '.hdf5')
# Each epoch update the loss graphs
plt.figure(1)
plt.plot(batches, adversarial_loss, color='green',
label='Generator Loss')
plt.plot(batches, discriminator_loss, color='blue',
label='Discriminator Loss')
plt.title("DCGAN Train")
plt.xlabel("Batch Iteration")
plt.ylabel("Loss")
if epoch == 0:
plt.legend()
plt.pause(0.0000000001)
plt.show()
plt.savefig('trainingLossPlot.png')
def main():
aws_secret = ''
aws_access = ''
dataset_path = '/home/jupyter/tutorials/generative-crystals/model/raw'
batch_size = 64
image_shape = (128, 128, 3)
epochs = 100
train_dcgan(aws_access, aws_secret, batch_size, epochs,
image_shape, dataset_path)
if __name__ == "__main__":
main() | import os
import boto3
from zipfile import ZipFile | random_line_split |
index.d.ts | declare module fng {
var formsAngular: angular.IModule;
/*
Type definitions for types that are used on both the client and the server
*/
/*
IInternalLookupreference makes it possible to look up from a list (of key / value pairs) in the current record. For example
var ShelfSchema = new Schema({
location: {type: String, required: true}
}); // Note that this schema needs an _id as it is an internal lookup
var ESchema = new Schema({
warehouse_name: {type: String, list: {}},
shelves: {type: [ShelfSchema]},
favouriteShelf: {type: Schema.Types.ObjectId, internalRef: {property: 'shelves', value:'location'};
});
*/
export interface IFngInternalLookupReference {
property: string;
value: string;
}
/*
ILookupListReference makes it possible to look up from a list (of key / value pairs)
in a document in another collection for example:
const LSchemaDef : IFngSchemaDefinition = {
descriptin: {type: String, required: true, list: {}},
warehouse: {type: Schema.Types.ObjectId, ref:'k_referencing_self_collection', form: {directive: 'fng-ui-select', fngUiSelect: {fngAjax: true}}},
shelf: {type: Schema.Types.ObjectId, lookupListRef: {collection:'k_referencing_self_collection', id:'$warehouse', property: 'shelves', value:'location'}},
};
*/
export interface IFngLookupListReference {
collection: string; // collection that contains the list
/*
Some means of calculating _id in collection. If it starts with $ then it is property in record
*/
id: string;
property: string;
value: string;
}
/*
showWhen allows conditional display of fields based on values elsewhere.
For example having prompted whether someone is a smoker you may want a field asking how many they smoke a day:
smoker: {type: Boolean},
howManyPerDay: {type: Number, form:{showWhen:{lhs:"$smoker", comp:"eq", rhs:true}}}
As you can see from the example there are three parts to the showIf object:
lhs (left hand side) a value to be compared. To use the current value of another field in the document preceed it with $.
comp supported comparators are 'eq' for equality, 'ne' for not equals, 'gt' (greater than), 'gte' (greater than or equal to),
'lt' (less than) and 'lte' (less than or equal to)
rhs (right hand side) the other value to be compared. Details as for lhs.
*/
export interface IFngShowWhen {
lhs: any;
comp: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte';
rhs: any;
}
/*
link allows the setting up of hyperlinks for lookup reference fields
*/
export interface IFngLinkSetup {
linkOnly?: boolean; // if true then the input element is not generated (this overrides label)
label?: boolean; // Make a link out of the label (causes text to be overridden) (this overrides text)
form?: string; // can be used to generate a link to a custom schema
text?: string; // the literal value used for the link. If this property is omitted then text is generated from the field values of the document referred to by the link.
}
export interface IFngSchemaTypeFormOpts {
/*
The input type to be generated - which must be compatible with the Mongoose type.
Common examples are email, url.
In addition to the standard HTML5 types there are some 'special' types:
textarea: a textarea control
radio: a radio button control
select: a select control
Note that if the field type is String and the name (or label) contains the string
'password' then type="password" will be used unless type="text".
If the Mongoose schema has an enum array you can specify a radio button group
(instead of a select) by using a type of radio
*/
type?: string;
hidden?: boolean; // inhibits this schema key from appearing on the generated form.
label?: string | null; // overrides the default input label. label:null suppresses the label altogether.
ref?: string; // reference to another collection
internalRef? : IFngInternalLookupReference;
lookupListRef?: IFngLookupListReference;
id?: string; // specifies the id of the input field (which defaults to f_name)
placeHolder?: string // adds placeholder text to the input (depending on data type).
help?: string; // adds help text under the input.
helpInline?: string; // adds help to the right of the input.
popup?: string; // adds popup help as specified.
order?: number; // allows user to specify the order / tab order of this field in the form. This overrides the position in the Mongoose schema.
size?: 'mini' | 'small' | 'medium' | 'large' | 'xlarge' | 'xxlarge' | 'block-level'; // sets control width. Default is 'medium''
readonly?: boolean; // adds the readonly attribute to the generated input (currently doesn't work with date - and perhaps other types).
rows?: number | 'auto'; // sets the number of rows in inputs (such as textarea) that support this. Setting rows to "auto" makes the textarea expand to fit the content, rather than create a scrollbar.
tab?: string; // Used to divide a large form up into a tabset with multiple tabs
showWhen?: IFngShowWhen | string; // allows conditional display of fields based on values elsewhere. string must be an abular expression.
/*
add: 'class="myClass"' allows custom styling of a specific input
Angular model options can be used - for example add: 'ng-model-options="{updateOn: \'default blur\', debounce: { \'default\': 500, \'blur\': 0 }}" '
custom validation directives, such as the timezone validation in this schema
*/
add?: string; // allows arbitrary attributes to be added to the input tag.
class?: string; // allows arbitrary classes to be added to the input tag.
inlineRadio?: boolean; // (only valid when type is radio) should be set to true to present all radio button options in a single line
link?: IFngLinkSetup; // handles displaying links for ref lookups
/*
With a select / radio type you can specify the options.
You can either do this by putting the option values in an array and passing it directly, or by putting them in an
array on the scope and passing the name of the array (which allows run-time modification
*/
options?: Array<string> | string;
/* Directive allows you to specify custom behaviour.
Gets passed attributes from form-input (with schema replaced with the current element - so add can be used to pass data into directives).
*/
directive?: string;
/* Inhibits the forms-angular client from looking up the possible values for a
IFngLookupReference or IFngInternalLookupReference field
(when a directive has a an alternative way of handling things)
*/
noLookup?: boolean;
/*
The next few options relate to the handling and display of arrays (including arrays of subdocuments)
*/
noAdd?: boolean; // inhibits an Add button being generated for arrays.
unshift?: boolean; // (for arrays of sub documents) puts an add button in the sub schema header which allows insertion of new sub documents at the beginning of the array.
noRemove?: boolean; // inhibits a Remove button being generated for array elements.
formstyle?: 'inline' | 'vertical' | 'horizontal' | 'horizontalCompact'; // (only valid on a sub schema) sets style of sub form.
sortable? : boolean; // Allows drag and drop sorting of arrays - requires angular-ui-sortable
/*
The next section relates to the display of sub documents
*/
customSubDoc?: string; // Allows you to specify custom HTML (which may include directives) for the sub doc
customHeader?: string; // Allows you to specify custom HTML (which may include directives) for the header of a group of sub docs
customFooter?: string; // Allows you to specify custom HTML (which may include directives) for the footer of a group of sub docs
}
// Schema passed from server - derived from Mongoose schema
export interface IFieldViewInfo extends IFngSchemaTypeFormOpts {
name: string;
schema?: Array<IFieldViewInfo>;
array?: boolean;
showIf? : any;
required?: boolean;
step? : number;
}
// Schema used internally on client - often derived from IFieldViewInfo passed from server
export interface IFormInstruction extends IFieldViewInfo {
id? : string; // id of generated DOM element
type?: 'string' | 'text' | 'textarea' | 'number' | 'select' | 'link' | 'date' | 'checkbox' | 'password';
defaultValue? : any; | label: string;
options?: any;
ids?: any;
hidden?: boolean;
tab?: string;
add? : string;
ref? : any;
link? : any;
linktext?: string;
linklabel?: boolean;
form?: string; // the form that is linked to
select2? : any; // deprecated
schema?: IFormInstruction[]; // If the field is an array of fields
}
export interface IContainer {
/*
Type of container, which determines markup. This is currently only available when the schema is generated by
the client for use independent of the BaseController
In the case of a string which does not match one of the predefined options
the generated container div is given the class of the name
*/
containerType: 'fieldset' | 'well' | 'tabset' | 'tab' | 'well-large' | 'well-small' | string;
title?: string;
/*
h1...h6 will use a header style
anything else will be used as a paragraph stype
*/
titleTagOrClass? : string;
content: IFormInstruction[];
}
export type IFormSchemaElement = IFormInstruction | IContainer;
export type IFormSchema = IFormSchemaElement[];
export type IControlledFormSchema = IFormInstruction[];
export interface IEnumInstruction {
repeat: string;
value: string;
label? : string;
}
export interface IFngCtrlState {
master: any;
allowLocationChange: boolean; // Do we allow location change or prompt for permission
}
export interface IRecordHandler {
convertToMongoModel(schema: IControlledFormSchema, anObject: any, prefixLength: number, scope: IFormScope): any;
createNew(dataToSave: any, options: any, scope: IFormScope, ctrlState: IFngCtrlState): void;
deleteRecord(model: any, id: any, scope: IFormScope, ctrlState: any): void;
updateDocument(dataToSave : any, options: any, scope: IFormScope, ctrlState: IFngCtrlState) : void;
readRecord($scope: IFormScope, ctrlState);
scrollTheList($scope: IFormScope);
getListData(record, fieldName, listSchema?, $scope?: IFormScope);
suffixCleanId(inst, suffix);
setData(object, fieldname, element, value);
setUpLookupOptions(lookupCollection, schemaElement, $scope: IFormScope, ctrlState, handleSchema);
setUpLookupListOptions: (ref: IFngLookupListReference, formInstructions: IFormInstruction, $scope: IFormScope, ctrlState: IFngCtrlState) => void;
handleInternalLookup($scope: IFormScope, formInstructions, ref): void;
preservePristine(element, fn): void;
convertIdToListValue(id, idsArray, valuesArray, fname);
decorateScope($scope:IFormScope, $uibModal, recordHandlerInstance : IRecordHandler, ctrlState);
fillFormFromBackendCustomSchema(schema, $scope:IFormScope, formGeneratorInstance, recordHandlerInstance, ctrlState);
fillFormWithBackendSchema($scope: IFormScope, formGeneratorInstance, recordHandlerInstance, ctrlState);
handleError($scope: IFormScope);
}
export interface IFormGenerator {
generateEditUrl(obj, $scope:IFormScope): string;
generateViewUrl(obj, $scope:IFormScope): string;
generateNewUrl($scope: IFormScope): string;
handleFieldType(formInstructions, mongooseType, mongooseOptions, $scope: IFormScope, ctrlState);
handleSchema(description: string, source, destForm, destList, prefix, doRecursion: boolean, $scope: IFormScope, ctrlState);
updateDataDependentDisplay(curValue, oldValue, force, $scope: IFormScope);
add(fieldName, $event, $scope: IFormScope);
unshift(fieldName, $event, $scope: IFormScope);
remove(fieldName, value, $event, $scope: IFormScope);
hasError(formName, name, index, $scope: IFormScope);
decorateScope($scope: IFormScope, formGeneratorInstance, recordHandlerInstance: IRecordHandler, sharedStuff);
}
export interface IFngSingleLookupHandler {
formInstructions: IFormInstruction;
lastPart: string;
possibleArray: string;
}
export interface IFngLookupHandler {
lookupOptions: string[];
lookupIds: string[];
handlers: IFngSingleLookupHandler[]
}
export interface IFngInternalLookupHandlerInfo extends IFngLookupHandler {
ref: IFngInternalLookupReference;
}
export interface IFngLookupListHandlerInfo extends IFngLookupHandler {
ref: IFngLookupListReference;
}
/*
The scope which contains form data
*/
export interface IFormScope extends angular.IScope {
sharedData: any;
modelNameDisplay : string;
modelName: string;
formName: string;
alertTitle: any;
errorVisible: boolean;
errorMessage: any;
errorHideTimer: number;
save: any;
newRecord: boolean;
initialiseNewRecord?: any;
id: any;
newClick: any;
deleteClick: any;
isDeleteDisabled: any;
isCancelDisabled: any;
isNewDisabled: any;
isSaveDisabled: any;
whyDisabled: string;
unconfirmedDelete: boolean;
getVal: any;
sortableOptions: any;
tabDeselect: any;
tabs?: Array<any>; // In the case of forms that contain a tab set
tab?: string; // title of the active tab - from the route
activeTabNo?: number;
topLevelFormName: string; // The name of the form
record: any;
originalData: any; // the unconverted data read from the server
phase: any;
disableFunctions: any;
dataEventFunctions: any;
listSchema: any;
recordList: any;
dataDependencies: any;
internalLookups: IFngInternalLookupHandlerInfo[];
listLookups: IFngLookupListHandlerInfo[];
conversions: any;
pageSize: any;
pagesLoaded: any;
cancel: () => any;
showError: (error: any, alertTitle? : string) => void;
prepareForSave: (cb: (error: string, dataToSave?: any) => void) => void;
setDefaults: (formSchema: IFormSchema, base?: string) => any;
formSchema: IControlledFormSchema;
baseSchema: () => Array<any>;
setFormDirty: any;
add: any;
hasError: any;
unshift: any;
remove: any;
openSelect2: any;
toJSON: any;
skipCols: any;
setPristine: any;
generateEditUrl: any;
generateViewUrl: any;
generateNewUrl: any;
scrollTheList: any;
getListData: any;
dismissError: () => void;
stickError: () => void;
clearTimeout: () => void;
handleHttpError: (response: any) => void;
dropConversionWatcher: () => void;
}
export interface IContextMenuDivider {
divider: boolean;
}
export interface IContextMenuOption {
// For it to make any sense, a menu option needs one of the next two properties
url?: string;
fn?: () => void;
text: string;
isDisabled?: () => boolean;
isHidden?: () => boolean;
// Does the option appear in the following contexts?
listing: boolean;
creating: boolean;
editing: boolean;
}
export interface IModelController extends IFormScope {
onBaseCtrlReady? : (baseScope: IFormScope) => void; // Optional callback after form is instantiated
onAllReady? : (baseScope: IFormScope) => void; // Optional callback after form is instantiated and populated
contextMenu? : Array<IContextMenuOption | IContextMenuDivider>
}
export interface IBaseFormOptions {
/**
* The style of the form layout. Supported values are horizontalcompact, horizontal, vertical, inline
*/
//TODO supported values should be in an enum
formstyle?: string;
/**
* Model on form scope (defaults to record).
* <li><strong>model</strong> the object in the scope to be bound to the model controller. Specifying
* the model inhibits the generation of the <strong>form</strong> tag unless the <strong>forceform</strong> attribute is set to true</li>
*/
model? : string;
/**
* The name to be given to the form - defaults to myForm
*/
name?: string;
/**
* Normally first field in a form gets autofocus set. Use this to prevent this
*/
noautofocus?: string;
}
export interface IFormAttrs extends IFormOptions, angular.IAttributes {
/**
* Schema used by the form
*/
schema : string;
forceform?: string; // Must be true or omitted. Forces generation of the <strong>form</strong> tag when model is specified
}
export interface IFormOptions extends IBaseFormOptions {
schema? : string;
subkey?: string;
subkeyno?: number;
subschema? : string;
subschemaroot? : string;
viewform? : boolean;
}
export interface IBuiltInRoute {
route: string;
state: string;
templateUrl: string;
options? : any;
}
export interface IRoutingConfig {
hashPrefix: string;
html5Mode: boolean;
routing: string; // What sort of routing do we want? ngroute or uirouter.
// TODO Should be enum
prefix: string; // How do we want to prefix out routes? If not empty string then first character must be slash (which is added if not)
// for example '/db' that gets prepended to all the generated routes. This can be used to
// prevent generated routes (which have a lot of parameters) from clashing with other routes in
// the web app that have nothing to do with CRUD forms
fixedRoutes?: Array<IBuiltInRoute>;
templateFolder?: string; // The folder where the templates for base-list, base-edit and base-analysis live. Internal templates used by default. For pre 0.7.0 behaviour use 'partials/'
add2fngRoutes?: any; // An object to add to the generated routes. One use case would be to add {authenticate: true}
// so that the client authenticates for certain routes
variantsForDemoWebsite? : any; // Just for demo website
variants?: any; // Just for demo website
}
export interface IFngRoute {
newRecord?: boolean;
analyse?: boolean;
modelName?: string;
reportSchemaName? : string;
id? : string;
formName? : string;
tab? : string;
variant? : string; // TODO should be enum of supported frameworks
}
}
declare var formsAngular: angular.IModule; | rows? : number; | random_line_split |
world.py | from __future__ import print_function
import numpy as np
import numdifftools as nd
from scipy.optimize import minimize, minimize_scalar
from scipy.linalg import eigh, inv, norm
from scipy.constants import e as qe # Charge of electron in Coulomb
from matplotlib import pyplot as plt
from .SH import funcSHexp
from .utils import quadru2hess, intersectBounds
amu = 1.66054e-27
class World:
'''
A General, Brand New World
Units:
(potential) energy: eV
length: __scale
frequency: Hz
Axis convention in consistance with <class Electrode>
z: axial
* It doesn't matter whether z is parallel or vertical to the surface or not
Attributes:
__scale :: the typical length in meter. Length unit in the code is self.__scale meter(s)
omega_rf:: the RF ANGULAR frequency
m :: the mass a single ion
bounds :: the boundaies of this world
dc_electrode_list :: a list of (name, electrode) s of dc electrodes
rf_electrode_list :: a list of (name, electrode) s of rf electrodes
electrode_dict :: dictionary that electrode_dict[name] = ("dc" or "rf", electrode)
_pseudopot_factor :: the factor in front of the pseudopotential
Methods:
'''
def __init__(self, ionA, omega_rf, scale=1):
"""
__init__(self, ionA, omega_rf, scale=1):
ionA: mass number of the ion
omega_rf: the RF ANGULAR frequency
scale : the typical length in meter. Length unit in the code is self.__scale meter(s)
"""
self.omega_rf = omega_rf
self.m = ionA * amu
self.__scale = scale
self._pseudopot_factor = qe/(4*self.m*(omega_rf**2))/(scale**2)
self.bounds = None # if no boundary, then None
self.electrode_dict = {}
self.rf_electrode_list = []
self.dc_electrode_list = []
def add_electrode(self, e, name, kind, volt):
"""
Add an electrode to the World. Name it with `name.
If kind == 'rf', then add this electrode to the rf electrode dict
as well as to the general electrode dict
"""
e.volt = volt
self.electrode_dict[name] = (kind, e)
if kind=='dc':
self.dc_electrode_list.append((name,e))
if kind=='rf':
self.rf_electrode_list.append((name,e))
def compute_dc_potential(self, r):
v = 0
for nam, e in self.dc_electrode_list:
v += e.compute_potential(r)
return v # the potential energy is automatically electron volts
def compute_dc_field(self, r):
E = np.zeros(3)
for nam, e in self.dc_electrode_list:
E += e.compute_electric_field(r)
return E
def compute_dc_hessian(self, r):
hess = np.zeros((3,3))
for nam, e in self.dc_electrode_list:
hess += e.compute_hessian(r)
return hess
def compute_rf_field(self, r):
"""
Just add up the electric field due to all the rf electrodes
not the gradient of pseudopotential
"""
E = np.zeros((3))
for nam, e in self.rf_electrode_list:
E += e.compute_electric_field(r)
return E
def check_bound(self):
self.bounds = intersectBounds([typ_elec[1].get_region_bounds() for typ_elec in self.electrode_dict.values()])
def compute_rf_null(self, z, xy0=(0,0), onyz=False, bounds=None):
"""
Search the RF null on a certain axial position
xy0: initial guess
onyz: whether restrict y=y0 or not
bounds: array([[xmin, xmax],[ymin, ymax], ...]) shaped (2,2) or (3,2)
"""
if bounds is None:
self.check_bound()
if self.bounds is None:
print("Cannot carry out RF null searching without bounds")
return
else:
bounds = self.bounds
if onyz: # x=0 required
fo = lambda y: sum(self.compute_rf_field(np.array([xy0[0],y,z]))**2)
ym = minimize_scalar(fo, bounds=tuple(bounds[1]), method="Bounded")
if ym.success:
ymy = ym.x
else:
print("@ z=%.3fmm Optimization Failed:"%z, ym.message, '. Returning initial value')
ymy = xy0[1]
yi = np.linspace(bounds[1,0],bounds[1,1],30)
plt.plot(yi, [fo(yy) for yy in yi], label="%.3f"%z)
plt.xlabel("y/mm")
plt.ylabel(r"$E_{RF}^2/\mathrm{(V^2mm^{-2})}$")
plt.title("RF null @x = 0")
# if ym.success:
# plt.plot([ymy],[ym.fun],'x')
# else:
# plt.legend(title="z/mm")
plt.show()
return np.array([xy0[0],ymy,z])
else:
foo= lambda xy: norm(self.compute_rf_field(np.array([xy[0],xy[1],z])))
if self.bounds is None:
xym = minimize(foo, xy0)
else:
xym = minimize(foo, xy0, bounds=bounds[:2])
if xym.success:
return np.array([xym.x[0],xym.x[1],z])
else:
print("Optimization Failure:", xym.message, '. Returning initial value')
return np.array([xy0[0],xy0[1],z])
def compute_pseudopot(self, r):
"""pseudopotential in eV"""
return self._pseudopot_factor*sum(self.compute_rf_field(r)**2)
def compute_pseudopot_hessian_atNULL(self, r):
"""This only valid when r is a RF null!"""
hess = np.zeros((3,3))
for nm, e in self.rf_electrode_list:
hess += e.compute_hessian(r)
return 2*self._pseudopot_factor*(np.dot(hess, hess.T))
def compute_pseudopot_frequencies(self, r):
|
def compute_dc_potential_frequencies(self, r):
'''
As always, this is valid only at the trapping position. Return frequency (not angular frequency)
'''
H = self.compute_dc_hessian(r)
hessdiag, eigvec = eigh(H)
hessdiag /= self.__scale**2
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), eigvec, hessdiag>0
def compute_full_potential(self, r):
# Full potential in eV
return self.compute_pseudopot(r) + self.compute_dc_potential(r)
# def compute_full_potential_frequencies(self, r):
# '''
# As always, this is valid only at the trapping position. Return frequency (not angular frequency)
# '''
# joule_to_ev = 6.24150934e18 # conversion factor to take joules -> eV
# ev_to_joule = 1.60217657e-19
# m = 6.64215568e-26 # 40 amu in kg
# H = nd.Hessian(self.compute_full_potential)(r)
# freq = np.linalg.eig(H)
# hessdiag = freq[0]
# eigvec = freq[1]
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
# return [fx, fy, fz], eigvec
def local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1):
"""
local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1)
Parameters:
position :: a (3,) array indicating the position of interest
loc_multipoles :: a list of multipoles that are of interest at this position
ctrl_electrodes :: a list of the INDICES of dc electrodes of interest
returns the matrix, shaped (len(loc_multipoles), len(ctrl_electrodes)), that maps DC voltages on `ctrl_electrodes to `loc_multipoles at `position
"""
self.loc_multipoles = loc_multipoles
if isinstance(ctrl_electrodes,str) and ctrl_electrodes=='alldc':
ctrl_electrodes = range(len(self.dc_electrode_list))
multipole_arr = np.empty((len(loc_multipoles), len(ctrl_electrodes)),'d')
for i, j in enumerate(ctrl_electrodes):
nam, elec = self.dc_electrode_list[j]
elec.expand_in_multipoles(position, loc_multipoles, r0)
multipole_arr[:,i] = [elec.multipole_dict[multipole] for multipole in loc_multipoles]
return multipole_arr
def multipole_control_vdc_arr(self, pos_ctrl_mults, ctrl_electrodes='alldc', cost_Q='id', r0=1):
"""
multipole_control_vdc_arr(self, pos_ctrl_mults, ctrl_electrodes='alldc', cost_Q='id', r0=1):
Parameters:
position :: a (3,) array indicating the position of interest
pos_ctrl_mults :: a list of (position, list of controlled local multipoles) pairs or a single pair
ctrl_electrodes :: a list of the INDICES of dc electrodes to be multipole-controlled
costQ :: the positive definite matrix Q in the cost function
return: The matrix, shaped (len(self.dc_electrodes), n_mult), controls DC voltages on ctrl_electrodes for pos_ctrl_mults.
n_mult = sum([len(pos_ctrl_mult[1]) for pos_ctrl_mult in pos_ctrl_mults])
Rows that correspond to electrodes that are not multipole-controlled are padded with 0
"""
alle = (isinstance(ctrl_electrodes,str) and ctrl_electrodes=='alldc')
if alle:
ctrl_electrodes = range(len(self.dc_electrode_list))
# Support inputing a single (position, list of controlled local multipoles) pair
if isinstance(pos_ctrl_mults, tuple) and len(pos_ctrl_mults)==2:
pos_ctrl_mults = [pos_ctrl_mults]
n_mult = sum([len(pos_ctrl_mult[1]) for pos_ctrl_mult in pos_ctrl_mults])
n_elec = len(ctrl_electrodes)
if n_mult > n_elec:
raise ValueError("Number of multipoles %d exceeds number of controlled electrodes %d"%(n_mult, n_elec))
multipole_arr = np.empty((n_mult, n_elec), 'd')
pt = 0
for pos, ctrl_mult in pos_ctrl_mults:
multipole_arr[pt:pt+len(ctrl_mult),:] = self.local_multipole_arr(pos, ctrl_mult, ctrl_electrodes, r0)
pt += len(ctrl_mult)
if isinstance(cost_Q,str) and cost_Q=='id':
cost_Q = np.identity(n_elec)
assert cost_Q.shape==(n_elec, n_elec)
cost_P = inv(cost_Q)
A = multipole_arr
assert A.ndim==2 and cost_P.ndim==2
PAt = np.dot(cost_P,A.T)
kernel = inv(np.dot(A,PAt)) # maps a multipole set-up to the half of the Lagrange multipliers
voltage_arr = np.dot(PAt,kernel) # maps a multipole set-up to the optimized voltage set-up
if not alle: #padding
for i in range(len(self.dc_electrode_list)):
if not i in ctrl_electrodes:
voltage_arr = np.insert(voltage_arr, i, np.zeros(n_mult), axis=0)
return voltage_arr
def set_volts(self, voltages, kind):
elist = self.dc_electrode_list if kind=='dc' else self.rf_electrode_list
assert len(voltages)==len(elist)
for i, nm_e in enumerate(elist):
nm_e[1].volt = voltages[i]
def fit_grad_hess(self, pos, h_grid, n_grid=5, order=3):
if n_grid%2==0:
n_grid += 1
gi = h_grid*(np.arange(n_grid) - n_grid//2)
f, res = funcSHexp(self.compute_full_potential, pos, pos[0]+gi, pos[1]+gi, pos[2]+gi, order)
print(res)
pot = f[0]
grad = np.array([-f[2],-f[3],f[1]])
quad = np.array([6*f[7], f[4], 12*f[8], -6*f[6], -6*f[5]])
# print(quad)
return pot, grad, quadru2hess(quad) | '''
This is only valid if xp, yp, zp is the trapping position. Return frequency (i.e. omega/(2*pi))
'''
hessdiag = nd.Hessdiag(self.compute_pseudopot,step=1e-6)(r)/(self.__scale**2)
'''
Now d2Udx2 has units of J/m^2. Then w = sqrt(d2Udx2/(mass)) has units of angular frequency
'''
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), hessdiag>0 | identifier_body |
world.py | from __future__ import print_function
import numpy as np
import numdifftools as nd
from scipy.optimize import minimize, minimize_scalar
from scipy.linalg import eigh, inv, norm
from scipy.constants import e as qe # Charge of electron in Coulomb
from matplotlib import pyplot as plt
|
class World:
'''
A General, Brand New World
Units:
(potential) energy: eV
length: __scale
frequency: Hz
Axis convention in consistance with <class Electrode>
z: axial
* It doesn't matter whether z is parallel or vertical to the surface or not
Attributes:
__scale :: the typical length in meter. Length unit in the code is self.__scale meter(s)
omega_rf:: the RF ANGULAR frequency
m :: the mass a single ion
bounds :: the boundaies of this world
dc_electrode_list :: a list of (name, electrode) s of dc electrodes
rf_electrode_list :: a list of (name, electrode) s of rf electrodes
electrode_dict :: dictionary that electrode_dict[name] = ("dc" or "rf", electrode)
_pseudopot_factor :: the factor in front of the pseudopotential
Methods:
'''
def __init__(self, ionA, omega_rf, scale=1):
"""
__init__(self, ionA, omega_rf, scale=1):
ionA: mass number of the ion
omega_rf: the RF ANGULAR frequency
scale : the typical length in meter. Length unit in the code is self.__scale meter(s)
"""
self.omega_rf = omega_rf
self.m = ionA * amu
self.__scale = scale
self._pseudopot_factor = qe/(4*self.m*(omega_rf**2))/(scale**2)
self.bounds = None # if no boundary, then None
self.electrode_dict = {}
self.rf_electrode_list = []
self.dc_electrode_list = []
def add_electrode(self, e, name, kind, volt):
"""
Add an electrode to the World. Name it with `name.
If kind == 'rf', then add this electrode to the rf electrode dict
as well as to the general electrode dict
"""
e.volt = volt
self.electrode_dict[name] = (kind, e)
if kind=='dc':
self.dc_electrode_list.append((name,e))
if kind=='rf':
self.rf_electrode_list.append((name,e))
def compute_dc_potential(self, r):
v = 0
for nam, e in self.dc_electrode_list:
v += e.compute_potential(r)
return v # the potential energy is automatically electron volts
def compute_dc_field(self, r):
E = np.zeros(3)
for nam, e in self.dc_electrode_list:
E += e.compute_electric_field(r)
return E
def compute_dc_hessian(self, r):
hess = np.zeros((3,3))
for nam, e in self.dc_electrode_list:
hess += e.compute_hessian(r)
return hess
def compute_rf_field(self, r):
"""
Just add up the electric field due to all the rf electrodes
not the gradient of pseudopotential
"""
E = np.zeros((3))
for nam, e in self.rf_electrode_list:
E += e.compute_electric_field(r)
return E
def check_bound(self):
self.bounds = intersectBounds([typ_elec[1].get_region_bounds() for typ_elec in self.electrode_dict.values()])
def compute_rf_null(self, z, xy0=(0,0), onyz=False, bounds=None):
"""
Search the RF null on a certain axial position
xy0: initial guess
onyz: whether restrict y=y0 or not
bounds: array([[xmin, xmax],[ymin, ymax], ...]) shaped (2,2) or (3,2)
"""
if bounds is None:
self.check_bound()
if self.bounds is None:
print("Cannot carry out RF null searching without bounds")
return
else:
bounds = self.bounds
if onyz: # x=0 required
fo = lambda y: sum(self.compute_rf_field(np.array([xy0[0],y,z]))**2)
ym = minimize_scalar(fo, bounds=tuple(bounds[1]), method="Bounded")
if ym.success:
ymy = ym.x
else:
print("@ z=%.3fmm Optimization Failed:"%z, ym.message, '. Returning initial value')
ymy = xy0[1]
yi = np.linspace(bounds[1,0],bounds[1,1],30)
plt.plot(yi, [fo(yy) for yy in yi], label="%.3f"%z)
plt.xlabel("y/mm")
plt.ylabel(r"$E_{RF}^2/\mathrm{(V^2mm^{-2})}$")
plt.title("RF null @x = 0")
# if ym.success:
# plt.plot([ymy],[ym.fun],'x')
# else:
# plt.legend(title="z/mm")
plt.show()
return np.array([xy0[0],ymy,z])
else:
foo= lambda xy: norm(self.compute_rf_field(np.array([xy[0],xy[1],z])))
if self.bounds is None:
xym = minimize(foo, xy0)
else:
xym = minimize(foo, xy0, bounds=bounds[:2])
if xym.success:
return np.array([xym.x[0],xym.x[1],z])
else:
print("Optimization Failure:", xym.message, '. Returning initial value')
return np.array([xy0[0],xy0[1],z])
def compute_pseudopot(self, r):
"""pseudopotential in eV"""
return self._pseudopot_factor*sum(self.compute_rf_field(r)**2)
def compute_pseudopot_hessian_atNULL(self, r):
"""This only valid when r is a RF null!"""
hess = np.zeros((3,3))
for nm, e in self.rf_electrode_list:
hess += e.compute_hessian(r)
return 2*self._pseudopot_factor*(np.dot(hess, hess.T))
def compute_pseudopot_frequencies(self, r):
'''
This is only valid if xp, yp, zp is the trapping position. Return frequency (i.e. omega/(2*pi))
'''
hessdiag = nd.Hessdiag(self.compute_pseudopot,step=1e-6)(r)/(self.__scale**2)
'''
Now d2Udx2 has units of J/m^2. Then w = sqrt(d2Udx2/(mass)) has units of angular frequency
'''
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), hessdiag>0
def compute_dc_potential_frequencies(self, r):
'''
As always, this is valid only at the trapping position. Return frequency (not angular frequency)
'''
H = self.compute_dc_hessian(r)
hessdiag, eigvec = eigh(H)
hessdiag /= self.__scale**2
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), eigvec, hessdiag>0
def compute_full_potential(self, r):
# Full potential in eV
return self.compute_pseudopot(r) + self.compute_dc_potential(r)
# def compute_full_potential_frequencies(self, r):
# '''
# As always, this is valid only at the trapping position. Return frequency (not angular frequency)
# '''
# joule_to_ev = 6.24150934e18 # conversion factor to take joules -> eV
# ev_to_joule = 1.60217657e-19
# m = 6.64215568e-26 # 40 amu in kg
# H = nd.Hessian(self.compute_full_potential)(r)
# freq = np.linalg.eig(H)
# hessdiag = freq[0]
# eigvec = freq[1]
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
# return [fx, fy, fz], eigvec
def local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1):
"""
local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1)
Parameters:
position :: a (3,) array indicating the position of interest
loc_multipoles :: a list of multipoles that are of interest at this position
ctrl_electrodes :: a list of the INDICES of dc electrodes of interest
returns the matrix, shaped (len(loc_multipoles), len(ctrl_electrodes)), that maps DC voltages on `ctrl_electrodes to `loc_multipoles at `position
"""
self.loc_multipoles = loc_multipoles
if isinstance(ctrl_electrodes,str) and ctrl_electrodes=='alldc':
ctrl_electrodes = range(len(self.dc_electrode_list))
multipole_arr = np.empty((len(loc_multipoles), len(ctrl_electrodes)),'d')
for i, j in enumerate(ctrl_electrodes):
nam, elec = self.dc_electrode_list[j]
elec.expand_in_multipoles(position, loc_multipoles, r0)
multipole_arr[:,i] = [elec.multipole_dict[multipole] for multipole in loc_multipoles]
return multipole_arr
def multipole_control_vdc_arr(self, pos_ctrl_mults, ctrl_electrodes='alldc', cost_Q='id', r0=1):
"""
multipole_control_vdc_arr(self, pos_ctrl_mults, ctrl_electrodes='alldc', cost_Q='id', r0=1):
Parameters:
position :: a (3,) array indicating the position of interest
pos_ctrl_mults :: a list of (position, list of controlled local multipoles) pairs or a single pair
ctrl_electrodes :: a list of the INDICES of dc electrodes to be multipole-controlled
costQ :: the positive definite matrix Q in the cost function
return: The matrix, shaped (len(self.dc_electrodes), n_mult), controls DC voltages on ctrl_electrodes for pos_ctrl_mults.
n_mult = sum([len(pos_ctrl_mult[1]) for pos_ctrl_mult in pos_ctrl_mults])
Rows that correspond to electrodes that are not multipole-controlled are padded with 0
"""
alle = (isinstance(ctrl_electrodes,str) and ctrl_electrodes=='alldc')
if alle:
ctrl_electrodes = range(len(self.dc_electrode_list))
# Support inputing a single (position, list of controlled local multipoles) pair
if isinstance(pos_ctrl_mults, tuple) and len(pos_ctrl_mults)==2:
pos_ctrl_mults = [pos_ctrl_mults]
n_mult = sum([len(pos_ctrl_mult[1]) for pos_ctrl_mult in pos_ctrl_mults])
n_elec = len(ctrl_electrodes)
if n_mult > n_elec:
raise ValueError("Number of multipoles %d exceeds number of controlled electrodes %d"%(n_mult, n_elec))
multipole_arr = np.empty((n_mult, n_elec), 'd')
pt = 0
for pos, ctrl_mult in pos_ctrl_mults:
multipole_arr[pt:pt+len(ctrl_mult),:] = self.local_multipole_arr(pos, ctrl_mult, ctrl_electrodes, r0)
pt += len(ctrl_mult)
if isinstance(cost_Q,str) and cost_Q=='id':
cost_Q = np.identity(n_elec)
assert cost_Q.shape==(n_elec, n_elec)
cost_P = inv(cost_Q)
A = multipole_arr
assert A.ndim==2 and cost_P.ndim==2
PAt = np.dot(cost_P,A.T)
kernel = inv(np.dot(A,PAt)) # maps a multipole set-up to the half of the Lagrange multipliers
voltage_arr = np.dot(PAt,kernel) # maps a multipole set-up to the optimized voltage set-up
if not alle: #padding
for i in range(len(self.dc_electrode_list)):
if not i in ctrl_electrodes:
voltage_arr = np.insert(voltage_arr, i, np.zeros(n_mult), axis=0)
return voltage_arr
def set_volts(self, voltages, kind):
elist = self.dc_electrode_list if kind=='dc' else self.rf_electrode_list
assert len(voltages)==len(elist)
for i, nm_e in enumerate(elist):
nm_e[1].volt = voltages[i]
def fit_grad_hess(self, pos, h_grid, n_grid=5, order=3):
if n_grid%2==0:
n_grid += 1
gi = h_grid*(np.arange(n_grid) - n_grid//2)
f, res = funcSHexp(self.compute_full_potential, pos, pos[0]+gi, pos[1]+gi, pos[2]+gi, order)
print(res)
pot = f[0]
grad = np.array([-f[2],-f[3],f[1]])
quad = np.array([6*f[7], f[4], 12*f[8], -6*f[6], -6*f[5]])
# print(quad)
return pot, grad, quadru2hess(quad) |
from .SH import funcSHexp
from .utils import quadru2hess, intersectBounds
amu = 1.66054e-27
| random_line_split |
world.py | from __future__ import print_function
import numpy as np
import numdifftools as nd
from scipy.optimize import minimize, minimize_scalar
from scipy.linalg import eigh, inv, norm
from scipy.constants import e as qe # Charge of electron in Coulomb
from matplotlib import pyplot as plt
from .SH import funcSHexp
from .utils import quadru2hess, intersectBounds
amu = 1.66054e-27
class World:
'''
A General, Brand New World
Units:
(potential) energy: eV
length: __scale
frequency: Hz
Axis convention in consistance with <class Electrode>
z: axial
* It doesn't matter whether z is parallel or vertical to the surface or not
Attributes:
__scale :: the typical length in meter. Length unit in the code is self.__scale meter(s)
omega_rf:: the RF ANGULAR frequency
m :: the mass a single ion
bounds :: the boundaies of this world
dc_electrode_list :: a list of (name, electrode) s of dc electrodes
rf_electrode_list :: a list of (name, electrode) s of rf electrodes
electrode_dict :: dictionary that electrode_dict[name] = ("dc" or "rf", electrode)
_pseudopot_factor :: the factor in front of the pseudopotential
Methods:
'''
def __init__(self, ionA, omega_rf, scale=1):
"""
__init__(self, ionA, omega_rf, scale=1):
ionA: mass number of the ion
omega_rf: the RF ANGULAR frequency
scale : the typical length in meter. Length unit in the code is self.__scale meter(s)
"""
self.omega_rf = omega_rf
self.m = ionA * amu
self.__scale = scale
self._pseudopot_factor = qe/(4*self.m*(omega_rf**2))/(scale**2)
self.bounds = None # if no boundary, then None
self.electrode_dict = {}
self.rf_electrode_list = []
self.dc_electrode_list = []
def | (self, e, name, kind, volt):
"""
Add an electrode to the World. Name it with `name.
If kind == 'rf', then add this electrode to the rf electrode dict
as well as to the general electrode dict
"""
e.volt = volt
self.electrode_dict[name] = (kind, e)
if kind=='dc':
self.dc_electrode_list.append((name,e))
if kind=='rf':
self.rf_electrode_list.append((name,e))
def compute_dc_potential(self, r):
v = 0
for nam, e in self.dc_electrode_list:
v += e.compute_potential(r)
return v # the potential energy is automatically electron volts
def compute_dc_field(self, r):
E = np.zeros(3)
for nam, e in self.dc_electrode_list:
E += e.compute_electric_field(r)
return E
def compute_dc_hessian(self, r):
hess = np.zeros((3,3))
for nam, e in self.dc_electrode_list:
hess += e.compute_hessian(r)
return hess
def compute_rf_field(self, r):
"""
Just add up the electric field due to all the rf electrodes
not the gradient of pseudopotential
"""
E = np.zeros((3))
for nam, e in self.rf_electrode_list:
E += e.compute_electric_field(r)
return E
def check_bound(self):
self.bounds = intersectBounds([typ_elec[1].get_region_bounds() for typ_elec in self.electrode_dict.values()])
def compute_rf_null(self, z, xy0=(0,0), onyz=False, bounds=None):
"""
Search the RF null on a certain axial position
xy0: initial guess
onyz: whether restrict y=y0 or not
bounds: array([[xmin, xmax],[ymin, ymax], ...]) shaped (2,2) or (3,2)
"""
if bounds is None:
self.check_bound()
if self.bounds is None:
print("Cannot carry out RF null searching without bounds")
return
else:
bounds = self.bounds
if onyz: # x=0 required
fo = lambda y: sum(self.compute_rf_field(np.array([xy0[0],y,z]))**2)
ym = minimize_scalar(fo, bounds=tuple(bounds[1]), method="Bounded")
if ym.success:
ymy = ym.x
else:
print("@ z=%.3fmm Optimization Failed:"%z, ym.message, '. Returning initial value')
ymy = xy0[1]
yi = np.linspace(bounds[1,0],bounds[1,1],30)
plt.plot(yi, [fo(yy) for yy in yi], label="%.3f"%z)
plt.xlabel("y/mm")
plt.ylabel(r"$E_{RF}^2/\mathrm{(V^2mm^{-2})}$")
plt.title("RF null @x = 0")
# if ym.success:
# plt.plot([ymy],[ym.fun],'x')
# else:
# plt.legend(title="z/mm")
plt.show()
return np.array([xy0[0],ymy,z])
else:
foo= lambda xy: norm(self.compute_rf_field(np.array([xy[0],xy[1],z])))
if self.bounds is None:
xym = minimize(foo, xy0)
else:
xym = minimize(foo, xy0, bounds=bounds[:2])
if xym.success:
return np.array([xym.x[0],xym.x[1],z])
else:
print("Optimization Failure:", xym.message, '. Returning initial value')
return np.array([xy0[0],xy0[1],z])
def compute_pseudopot(self, r):
"""pseudopotential in eV"""
return self._pseudopot_factor*sum(self.compute_rf_field(r)**2)
def compute_pseudopot_hessian_atNULL(self, r):
"""This only valid when r is a RF null!"""
hess = np.zeros((3,3))
for nm, e in self.rf_electrode_list:
hess += e.compute_hessian(r)
return 2*self._pseudopot_factor*(np.dot(hess, hess.T))
def compute_pseudopot_frequencies(self, r):
'''
This is only valid if xp, yp, zp is the trapping position. Return frequency (i.e. omega/(2*pi))
'''
hessdiag = nd.Hessdiag(self.compute_pseudopot,step=1e-6)(r)/(self.__scale**2)
'''
Now d2Udx2 has units of J/m^2. Then w = sqrt(d2Udx2/(mass)) has units of angular frequency
'''
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), hessdiag>0
def compute_dc_potential_frequencies(self, r):
'''
As always, this is valid only at the trapping position. Return frequency (not angular frequency)
'''
H = self.compute_dc_hessian(r)
hessdiag, eigvec = eigh(H)
hessdiag /= self.__scale**2
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), eigvec, hessdiag>0
def compute_full_potential(self, r):
# Full potential in eV
return self.compute_pseudopot(r) + self.compute_dc_potential(r)
# def compute_full_potential_frequencies(self, r):
# '''
# As always, this is valid only at the trapping position. Return frequency (not angular frequency)
# '''
# joule_to_ev = 6.24150934e18 # conversion factor to take joules -> eV
# ev_to_joule = 1.60217657e-19
# m = 6.64215568e-26 # 40 amu in kg
# H = nd.Hessian(self.compute_full_potential)(r)
# freq = np.linalg.eig(H)
# hessdiag = freq[0]
# eigvec = freq[1]
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
# return [fx, fy, fz], eigvec
def local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1):
"""
local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1)
Parameters:
position :: a (3,) array indicating the position of interest
loc_multipoles :: a list of multipoles that are of interest at this position
ctrl_electrodes :: a list of the INDICES of dc electrodes of interest
returns the matrix, shaped (len(loc_multipoles), len(ctrl_electrodes)), that maps DC voltages on `ctrl_electrodes to `loc_multipoles at `position
"""
self.loc_multipoles = loc_multipoles
if isinstance(ctrl_electrodes,str) and ctrl_electrodes=='alldc':
ctrl_electrodes = range(len(self.dc_electrode_list))
multipole_arr = np.empty((len(loc_multipoles), len(ctrl_electrodes)),'d')
for i, j in enumerate(ctrl_electrodes):
nam, elec = self.dc_electrode_list[j]
elec.expand_in_multipoles(position, loc_multipoles, r0)
multipole_arr[:,i] = [elec.multipole_dict[multipole] for multipole in loc_multipoles]
return multipole_arr
def multipole_control_vdc_arr(self, pos_ctrl_mults, ctrl_electrodes='alldc', cost_Q='id', r0=1):
"""
multipole_control_vdc_arr(self, pos_ctrl_mults, ctrl_electrodes='alldc', cost_Q='id', r0=1):
Parameters:
position :: a (3,) array indicating the position of interest
pos_ctrl_mults :: a list of (position, list of controlled local multipoles) pairs or a single pair
ctrl_electrodes :: a list of the INDICES of dc electrodes to be multipole-controlled
costQ :: the positive definite matrix Q in the cost function
return: The matrix, shaped (len(self.dc_electrodes), n_mult), controls DC voltages on ctrl_electrodes for pos_ctrl_mults.
n_mult = sum([len(pos_ctrl_mult[1]) for pos_ctrl_mult in pos_ctrl_mults])
Rows that correspond to electrodes that are not multipole-controlled are padded with 0
"""
alle = (isinstance(ctrl_electrodes,str) and ctrl_electrodes=='alldc')
if alle:
ctrl_electrodes = range(len(self.dc_electrode_list))
# Support inputing a single (position, list of controlled local multipoles) pair
if isinstance(pos_ctrl_mults, tuple) and len(pos_ctrl_mults)==2:
pos_ctrl_mults = [pos_ctrl_mults]
n_mult = sum([len(pos_ctrl_mult[1]) for pos_ctrl_mult in pos_ctrl_mults])
n_elec = len(ctrl_electrodes)
if n_mult > n_elec:
raise ValueError("Number of multipoles %d exceeds number of controlled electrodes %d"%(n_mult, n_elec))
multipole_arr = np.empty((n_mult, n_elec), 'd')
pt = 0
for pos, ctrl_mult in pos_ctrl_mults:
multipole_arr[pt:pt+len(ctrl_mult),:] = self.local_multipole_arr(pos, ctrl_mult, ctrl_electrodes, r0)
pt += len(ctrl_mult)
if isinstance(cost_Q,str) and cost_Q=='id':
cost_Q = np.identity(n_elec)
assert cost_Q.shape==(n_elec, n_elec)
cost_P = inv(cost_Q)
A = multipole_arr
assert A.ndim==2 and cost_P.ndim==2
PAt = np.dot(cost_P,A.T)
kernel = inv(np.dot(A,PAt)) # maps a multipole set-up to the half of the Lagrange multipliers
voltage_arr = np.dot(PAt,kernel) # maps a multipole set-up to the optimized voltage set-up
if not alle: #padding
for i in range(len(self.dc_electrode_list)):
if not i in ctrl_electrodes:
voltage_arr = np.insert(voltage_arr, i, np.zeros(n_mult), axis=0)
return voltage_arr
def set_volts(self, voltages, kind):
elist = self.dc_electrode_list if kind=='dc' else self.rf_electrode_list
assert len(voltages)==len(elist)
for i, nm_e in enumerate(elist):
nm_e[1].volt = voltages[i]
def fit_grad_hess(self, pos, h_grid, n_grid=5, order=3):
if n_grid%2==0:
n_grid += 1
gi = h_grid*(np.arange(n_grid) - n_grid//2)
f, res = funcSHexp(self.compute_full_potential, pos, pos[0]+gi, pos[1]+gi, pos[2]+gi, order)
print(res)
pot = f[0]
grad = np.array([-f[2],-f[3],f[1]])
quad = np.array([6*f[7], f[4], 12*f[8], -6*f[6], -6*f[5]])
# print(quad)
return pot, grad, quadru2hess(quad) | add_electrode | identifier_name |
world.py | from __future__ import print_function
import numpy as np
import numdifftools as nd
from scipy.optimize import minimize, minimize_scalar
from scipy.linalg import eigh, inv, norm
from scipy.constants import e as qe # Charge of electron in Coulomb
from matplotlib import pyplot as plt
from .SH import funcSHexp
from .utils import quadru2hess, intersectBounds
amu = 1.66054e-27
class World:
'''
A General, Brand New World
Units:
(potential) energy: eV
length: __scale
frequency: Hz
Axis convention in consistance with <class Electrode>
z: axial
* It doesn't matter whether z is parallel or vertical to the surface or not
Attributes:
__scale :: the typical length in meter. Length unit in the code is self.__scale meter(s)
omega_rf:: the RF ANGULAR frequency
m :: the mass a single ion
bounds :: the boundaies of this world
dc_electrode_list :: a list of (name, electrode) s of dc electrodes
rf_electrode_list :: a list of (name, electrode) s of rf electrodes
electrode_dict :: dictionary that electrode_dict[name] = ("dc" or "rf", electrode)
_pseudopot_factor :: the factor in front of the pseudopotential
Methods:
'''
def __init__(self, ionA, omega_rf, scale=1):
"""
__init__(self, ionA, omega_rf, scale=1):
ionA: mass number of the ion
omega_rf: the RF ANGULAR frequency
scale : the typical length in meter. Length unit in the code is self.__scale meter(s)
"""
self.omega_rf = omega_rf
self.m = ionA * amu
self.__scale = scale
self._pseudopot_factor = qe/(4*self.m*(omega_rf**2))/(scale**2)
self.bounds = None # if no boundary, then None
self.electrode_dict = {}
self.rf_electrode_list = []
self.dc_electrode_list = []
def add_electrode(self, e, name, kind, volt):
"""
Add an electrode to the World. Name it with `name.
If kind == 'rf', then add this electrode to the rf electrode dict
as well as to the general electrode dict
"""
e.volt = volt
self.electrode_dict[name] = (kind, e)
if kind=='dc':
self.dc_electrode_list.append((name,e))
if kind=='rf':
self.rf_electrode_list.append((name,e))
def compute_dc_potential(self, r):
v = 0
for nam, e in self.dc_electrode_list:
v += e.compute_potential(r)
return v # the potential energy is automatically electron volts
def compute_dc_field(self, r):
E = np.zeros(3)
for nam, e in self.dc_electrode_list:
E += e.compute_electric_field(r)
return E
def compute_dc_hessian(self, r):
hess = np.zeros((3,3))
for nam, e in self.dc_electrode_list:
hess += e.compute_hessian(r)
return hess
def compute_rf_field(self, r):
"""
Just add up the electric field due to all the rf electrodes
not the gradient of pseudopotential
"""
E = np.zeros((3))
for nam, e in self.rf_electrode_list:
E += e.compute_electric_field(r)
return E
def check_bound(self):
self.bounds = intersectBounds([typ_elec[1].get_region_bounds() for typ_elec in self.electrode_dict.values()])
def compute_rf_null(self, z, xy0=(0,0), onyz=False, bounds=None):
"""
Search the RF null on a certain axial position
xy0: initial guess
onyz: whether restrict y=y0 or not
bounds: array([[xmin, xmax],[ymin, ymax], ...]) shaped (2,2) or (3,2)
"""
if bounds is None:
self.check_bound()
if self.bounds is None:
print("Cannot carry out RF null searching without bounds")
return
else:
bounds = self.bounds
if onyz: # x=0 required
fo = lambda y: sum(self.compute_rf_field(np.array([xy0[0],y,z]))**2)
ym = minimize_scalar(fo, bounds=tuple(bounds[1]), method="Bounded")
if ym.success:
ymy = ym.x
else:
print("@ z=%.3fmm Optimization Failed:"%z, ym.message, '. Returning initial value')
ymy = xy0[1]
yi = np.linspace(bounds[1,0],bounds[1,1],30)
plt.plot(yi, [fo(yy) for yy in yi], label="%.3f"%z)
plt.xlabel("y/mm")
plt.ylabel(r"$E_{RF}^2/\mathrm{(V^2mm^{-2})}$")
plt.title("RF null @x = 0")
# if ym.success:
# plt.plot([ymy],[ym.fun],'x')
# else:
# plt.legend(title="z/mm")
plt.show()
return np.array([xy0[0],ymy,z])
else:
foo= lambda xy: norm(self.compute_rf_field(np.array([xy[0],xy[1],z])))
if self.bounds is None:
xym = minimize(foo, xy0)
else:
xym = minimize(foo, xy0, bounds=bounds[:2])
if xym.success:
return np.array([xym.x[0],xym.x[1],z])
else:
print("Optimization Failure:", xym.message, '. Returning initial value')
return np.array([xy0[0],xy0[1],z])
def compute_pseudopot(self, r):
"""pseudopotential in eV"""
return self._pseudopot_factor*sum(self.compute_rf_field(r)**2)
def compute_pseudopot_hessian_atNULL(self, r):
"""This only valid when r is a RF null!"""
hess = np.zeros((3,3))
for nm, e in self.rf_electrode_list:
hess += e.compute_hessian(r)
return 2*self._pseudopot_factor*(np.dot(hess, hess.T))
def compute_pseudopot_frequencies(self, r):
'''
This is only valid if xp, yp, zp is the trapping position. Return frequency (i.e. omega/(2*pi))
'''
hessdiag = nd.Hessdiag(self.compute_pseudopot,step=1e-6)(r)/(self.__scale**2)
'''
Now d2Udx2 has units of J/m^2. Then w = sqrt(d2Udx2/(mass)) has units of angular frequency
'''
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), hessdiag>0
def compute_dc_potential_frequencies(self, r):
'''
As always, this is valid only at the trapping position. Return frequency (not angular frequency)
'''
H = self.compute_dc_hessian(r)
hessdiag, eigvec = eigh(H)
hessdiag /= self.__scale**2
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), eigvec, hessdiag>0
def compute_full_potential(self, r):
# Full potential in eV
return self.compute_pseudopot(r) + self.compute_dc_potential(r)
# def compute_full_potential_frequencies(self, r):
# '''
# As always, this is valid only at the trapping position. Return frequency (not angular frequency)
# '''
# joule_to_ev = 6.24150934e18 # conversion factor to take joules -> eV
# ev_to_joule = 1.60217657e-19
# m = 6.64215568e-26 # 40 amu in kg
# H = nd.Hessian(self.compute_full_potential)(r)
# freq = np.linalg.eig(H)
# hessdiag = freq[0]
# eigvec = freq[1]
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
# return [fx, fy, fz], eigvec
def local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1):
"""
local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1)
Parameters:
position :: a (3,) array indicating the position of interest
loc_multipoles :: a list of multipoles that are of interest at this position
ctrl_electrodes :: a list of the INDICES of dc electrodes of interest
returns the matrix, shaped (len(loc_multipoles), len(ctrl_electrodes)), that maps DC voltages on `ctrl_electrodes to `loc_multipoles at `position
"""
self.loc_multipoles = loc_multipoles
if isinstance(ctrl_electrodes,str) and ctrl_electrodes=='alldc':
ctrl_electrodes = range(len(self.dc_electrode_list))
multipole_arr = np.empty((len(loc_multipoles), len(ctrl_electrodes)),'d')
for i, j in enumerate(ctrl_electrodes):
nam, elec = self.dc_electrode_list[j]
elec.expand_in_multipoles(position, loc_multipoles, r0)
multipole_arr[:,i] = [elec.multipole_dict[multipole] for multipole in loc_multipoles]
return multipole_arr
def multipole_control_vdc_arr(self, pos_ctrl_mults, ctrl_electrodes='alldc', cost_Q='id', r0=1):
"""
multipole_control_vdc_arr(self, pos_ctrl_mults, ctrl_electrodes='alldc', cost_Q='id', r0=1):
Parameters:
position :: a (3,) array indicating the position of interest
pos_ctrl_mults :: a list of (position, list of controlled local multipoles) pairs or a single pair
ctrl_electrodes :: a list of the INDICES of dc electrodes to be multipole-controlled
costQ :: the positive definite matrix Q in the cost function
return: The matrix, shaped (len(self.dc_electrodes), n_mult), controls DC voltages on ctrl_electrodes for pos_ctrl_mults.
n_mult = sum([len(pos_ctrl_mult[1]) for pos_ctrl_mult in pos_ctrl_mults])
Rows that correspond to electrodes that are not multipole-controlled are padded with 0
"""
alle = (isinstance(ctrl_electrodes,str) and ctrl_electrodes=='alldc')
if alle:
ctrl_electrodes = range(len(self.dc_electrode_list))
# Support inputing a single (position, list of controlled local multipoles) pair
if isinstance(pos_ctrl_mults, tuple) and len(pos_ctrl_mults)==2:
|
n_mult = sum([len(pos_ctrl_mult[1]) for pos_ctrl_mult in pos_ctrl_mults])
n_elec = len(ctrl_electrodes)
if n_mult > n_elec:
raise ValueError("Number of multipoles %d exceeds number of controlled electrodes %d"%(n_mult, n_elec))
multipole_arr = np.empty((n_mult, n_elec), 'd')
pt = 0
for pos, ctrl_mult in pos_ctrl_mults:
multipole_arr[pt:pt+len(ctrl_mult),:] = self.local_multipole_arr(pos, ctrl_mult, ctrl_electrodes, r0)
pt += len(ctrl_mult)
if isinstance(cost_Q,str) and cost_Q=='id':
cost_Q = np.identity(n_elec)
assert cost_Q.shape==(n_elec, n_elec)
cost_P = inv(cost_Q)
A = multipole_arr
assert A.ndim==2 and cost_P.ndim==2
PAt = np.dot(cost_P,A.T)
kernel = inv(np.dot(A,PAt)) # maps a multipole set-up to the half of the Lagrange multipliers
voltage_arr = np.dot(PAt,kernel) # maps a multipole set-up to the optimized voltage set-up
if not alle: #padding
for i in range(len(self.dc_electrode_list)):
if not i in ctrl_electrodes:
voltage_arr = np.insert(voltage_arr, i, np.zeros(n_mult), axis=0)
return voltage_arr
def set_volts(self, voltages, kind):
elist = self.dc_electrode_list if kind=='dc' else self.rf_electrode_list
assert len(voltages)==len(elist)
for i, nm_e in enumerate(elist):
nm_e[1].volt = voltages[i]
def fit_grad_hess(self, pos, h_grid, n_grid=5, order=3):
if n_grid%2==0:
n_grid += 1
gi = h_grid*(np.arange(n_grid) - n_grid//2)
f, res = funcSHexp(self.compute_full_potential, pos, pos[0]+gi, pos[1]+gi, pos[2]+gi, order)
print(res)
pot = f[0]
grad = np.array([-f[2],-f[3],f[1]])
quad = np.array([6*f[7], f[4], 12*f[8], -6*f[6], -6*f[5]])
# print(quad)
return pot, grad, quadru2hess(quad) | pos_ctrl_mults = [pos_ctrl_mults] | conditional_block |
train.py | import argparse
import random
import numpy as np
import torch
import torch.distributed as dist
from torch.optim import Adam, AdamW, SGD
from torch.optim.adagrad import Adagrad
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from gyrospd import config
from gyrospd import losses
from gyrospd.utils import set_seed, get_logging, get_inverted_triples
from gyrospd.runner import Runner
from gyrospd.models import *
from gyrospd.manifolds.metrics import MetricType
def config_parser(parser):
# Data options
parser.add_argument("--data", required=True, type=str, help="Name of data set folder")
parser.add_argument("--run_id", required=True, type=str, help="Name of model/run to export")
# Model
parser.add_argument("--model", default="tgattnspd", type=str, help="Model type: tgspd, tgattnspd")
parser.add_argument("--metric", default="riem", type=str, help=f"Metrics: {[t.value for t in list(MetricType)]}")
parser.add_argument("--dims", default=4, type=int, help="Dimensions for the model.")
parser.add_argument("--train_bias", dest='train_bias', action='store_true', default=False,
help="Whether to train scaling or not.")
parser.add_argument("--use_hrh", default=1, type=int, help="Whether to use HRH or RHR in lhs op.")
parser.add_argument("--inverse_tail", default=0, type=int, help="Whether to use t or t^-1 as tail")
parser.add_argument("--loss", choices=["BCELoss", "HingeLoss"], default="BCELoss", help="Loss function")
parser.add_argument("--hinge_margin", default=1, type=float, help="Margin for hinge loss function")
parser.add_argument("--regularizer", choices=["N3", "F2"], default="N3", help="Regularizer")
parser.add_argument("--regularizer_weight", default=0, type=float, help="Regularization weight")
parser.add_argument("--neg_sample_size", default=1, type=int, help="Negative sample size, -1 to not use")
parser.add_argument("--double_neg", action="store_true", default=False,
help="Whether to negative sample both head and tail entities")
# optim and config
parser.add_argument("--optim", default="adam", type=str, help="Optimization method.")
parser.add_argument("--amsgrad", action="store_true", default=False, help="Use AMS grad in Adam or AdamW")
parser.add_argument("--learning_rate", default=1e-3, type=float, help="Starting learning rate.")
parser.add_argument("--reduce_factor", default=2, type=float, help="Factor to reduce lr on plateau.")
parser.add_argument("--weight_decay", default=0.00, type=float, help="L2 Regularization.")
parser.add_argument("--val_every", default=5, type=int, help="Runs validation every n epochs.")
parser.add_argument("--patience", default=50, type=int, help="Epochs of patience for scheduler and early stop.")
parser.add_argument("--max_grad_norm", default=50.0, type=float, help="Max gradient norm.")
parser.add_argument("--batch_size", default=1000, type=int, help="Batch size.")
parser.add_argument("--eval_batch_size", default=100, type=int, help="Eval batch size. Has impact only on memory")
parser.add_argument("--epochs", default=100, type=int, help="Number of training epochs.")
parser.add_argument("--burnin", default=10, type=int, help="Number of initial epochs to train with reduce lr.")
parser.add_argument("--grad_accum_steps", default=1, type=int,
help="Number of update steps to accumulate before backward.")
parser.add_argument("--subsample", default=-1, type=float, help="Subsamples a % of valid triples")
# Others
parser.add_argument("--dtype", default="single", type=str, choices=["single", "double"], help="Machine precision")
parser.add_argument("--local_rank", type=int, help="Local process rank assigned by torch.distributed.launch")
parser.add_argument("--n_procs", default=4, type=int, help="Number of process to create")
parser.add_argument("--load_ckpt", default="", type=str, help="Load model from this file")
parser.add_argument("--results_file", default="out/results.csv", type=str, help="Exports final results to this file")
parser.add_argument("--save_epochs", default=10001, type=int, help="Exports every n epochs")
parser.add_argument("--seed", default=42, type=int, help="Seed")
parser.add_argument("--debug", dest='debug', action='store_true', default=False, help="Debug mode")
def load_ckpt(args, log):
saved_data = torch.load(args.load_ckpt) if args.load_ckpt else {}
args.init_epoch = 0
args.current_iter = 1
if saved_data:
local_rank = args.local_rank
if local_rank == 0:
log.info(f"Loaded CKPT: {args.load_ckpt}, Args in ckpt: {saved_data['args']}")
args = saved_data["args"]
args.local_rank = local_rank
args.init_epoch = saved_data["epochs"]
args.current_iter += 1
return args, saved_data
def get_model(args, saved_data=None):
if args.model == "tgspd":
model = TgSPDModel(args)
elif args.model == "tgrotspd":
model = TgSPDRotationModel(args)
elif args.model == "tgrefspd":
model = TgSPDReflectionModel(args)
elif args.model == "tgattnspd":
model = TgSPDAttnModel(args)
else:
|
model.to(config.DEVICE)
model = DistributedDataParallel(model, device_ids=None)
if saved_data:
model.load_state_dict(saved_data["model_state"])
return model
def get_optimizer(model, args, saved_data=None):
if args.optim == "sgd":
optim = SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
elif args.optim == "adam":
optim = Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adamw":
optim = AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adagrad":
optim = Adagrad(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
else:
raise ValueError(f"Unkown --optim option: {args.optim}")
if saved_data:
optim.load_state_dict(saved_data["optimizer_state"])
return optim
def get_scheduler(optimizer, args, saved_data=None):
patience = round(args.patience / args.val_every)
factor = 1 / float(args.reduce_factor)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=patience, factor=factor, mode="max")
if saved_data:
scheduler.load_state_dict(saved_data["scheduler_state"])
return scheduler
def build_data_loader(split, batch_size, shuffle, args):
"""
:param split: torch.LongTensor b x 3 with triples (h, r, t)
:param batch_size: int
:param shuffle: bool
:param args:
:return: torch DataLoader set up with distributed sampler
"""
tensor_dataset = TensorDataset(split)
sampler = DistributedSampler(tensor_dataset, num_replicas=args.n_procs, rank=args.local_rank, shuffle=shuffle)
data_loader = DataLoader(dataset=tensor_dataset, batch_size=batch_size, shuffle=False, num_workers=0,
pin_memory=True, sampler=sampler)
return data_loader
def load_training_data(args, log):
data_path = config.PREP_PATH / f"{args.data}/{config.PREPROCESSED_FILE}"
log.info(f"Loading data from {data_path}")
data = torch.load(str(data_path))
rel2id = data["rel2id"]
num_relations = len(rel2id)
train, valid, test = data["train"], data["valid"], data["test"]
if args.debug:
train = train[:10]
valid = valid[:4]
test = test[:4]
args.batch_size = 40
augmented = get_inverted_triples(train, num_relations)
train = np.vstack((train, augmented))
valid_proportion = args.subsample if args.subsample > 0 else 1
valid = valid[:int(len(valid) * valid_proportion)]
inverted_valid = get_inverted_triples(valid, num_relations)
inverted_test = get_inverted_triples(test, num_relations)
train, valid, inverted_valid, test, inverted_test = [torch.LongTensor(split) for split in
[train, valid, inverted_valid, test, inverted_test]]
train_batch_size = args.batch_size // args.n_procs
eval_batch_size = args.eval_batch_size // args.n_procs
log.info(f"Batch size {train_batch_size} for {args.local_rank}/{args.n_procs} processes")
train_loader = build_data_loader(train, train_batch_size, shuffle=True, args=args)
lhs_valid_loader = build_data_loader(valid, eval_batch_size, shuffle=False, args=args)
rhs_valid_loader = build_data_loader(inverted_valid, eval_batch_size, shuffle=False, args=args)
lhs_test_loader = build_data_loader(test, eval_batch_size, shuffle=False, args=args)
rhs_test_loader = build_data_loader(inverted_test, eval_batch_size, shuffle=False, args=args)
valid_loaders = {"lhs": lhs_valid_loader, "rhs": rhs_valid_loader}
test_loaders = {"lhs": lhs_test_loader, "rhs": rhs_test_loader}
# add the inverted relations into the rel2id dict
invrel2id = {f"INV_{rel_name}": rel_id + num_relations for rel_name, rel_id in rel2id.items()}
rel2id = {**rel2id, **invrel2id}
return train_loader, valid_loaders, test_loaders, data["filters"], data["ent2id"], rel2id
def main():
parser = argparse.ArgumentParser("train.py")
config_parser(parser)
args = parser.parse_args()
log = get_logging()
torch.set_default_dtype(torch.float64 if args.dtype == "double" else torch.float32)
args, saved_data = load_ckpt(args, log)
torch.autograd.set_detect_anomaly(args.debug)
# sets random seed
seed = args.seed if args.seed > 0 else random.randint(1, 1000000)
set_seed(seed)
if args.local_rank == 0:
log.info(args)
dist.init_process_group(backend=config.BACKEND, init_method='env://') # world_size=args.n_procs, rank=args.local_rank)
# correct parameters due to distributed training. In case of loading ckpt, this value will be
# ignored when we load the optimizer state dict
args.learning_rate *= args.n_procs
train_loader, valid_loaders, test_loaders, filters, ent2id, rel2id = load_training_data(args, log)
args.num_entities = len(ent2id)
args.num_relations = len(rel2id) # already has inverted relations
model = get_model(args, saved_data)
optimizer = get_optimizer(model, args, saved_data)
scheduler = get_scheduler(optimizer, args, saved_data)
loss = getattr(losses, args.loss)(args)
if args.local_rank == 0:
log.info(f"GPU's available: {torch.cuda.device_count()}")
n_params = sum([p.nelement() for p in model.parameters() if p.requires_grad])
log.info(f"Entities: {args.num_entities}, relations: {args.num_relations}, dims: {args.dims}, "
f"number of parameters: {n_params}")
log.info(model)
log.info(f"Triples train: {len(train_loader.dataset)}, valid lhs: {len(valid_loaders['lhs'].dataset)}, "
f"test lhs: {len(test_loaders['lhs'].dataset)}")
if args.model in ("spd", "tgspd") and args.metric == "fone" and args.dims % 2 == 1:
log.info("WARNING: SPD with Fone Metric and uneven number of dimensions can be unstable!!!")
runner = Runner(model, optimizer, scheduler=scheduler, loss=loss, ent2id=ent2id, rel2id=rel2id, args=args,
train_loader=train_loader, valid_loaders=valid_loaders, test_loaders=test_loaders, filters=filters)
runner.run()
log.info("Done!")
if __name__ == "__main__":
main()
| raise ValueError(f"Unrecognized model argument: {args.model}") | conditional_block |
train.py | import argparse
import random
import numpy as np
import torch
import torch.distributed as dist
from torch.optim import Adam, AdamW, SGD
from torch.optim.adagrad import Adagrad
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from gyrospd import config
from gyrospd import losses
from gyrospd.utils import set_seed, get_logging, get_inverted_triples
from gyrospd.runner import Runner
from gyrospd.models import *
from gyrospd.manifolds.metrics import MetricType
def config_parser(parser):
# Data options
parser.add_argument("--data", required=True, type=str, help="Name of data set folder")
parser.add_argument("--run_id", required=True, type=str, help="Name of model/run to export")
# Model
parser.add_argument("--model", default="tgattnspd", type=str, help="Model type: tgspd, tgattnspd")
parser.add_argument("--metric", default="riem", type=str, help=f"Metrics: {[t.value for t in list(MetricType)]}")
parser.add_argument("--dims", default=4, type=int, help="Dimensions for the model.")
parser.add_argument("--train_bias", dest='train_bias', action='store_true', default=False,
help="Whether to train scaling or not.")
parser.add_argument("--use_hrh", default=1, type=int, help="Whether to use HRH or RHR in lhs op.")
parser.add_argument("--inverse_tail", default=0, type=int, help="Whether to use t or t^-1 as tail")
parser.add_argument("--loss", choices=["BCELoss", "HingeLoss"], default="BCELoss", help="Loss function")
parser.add_argument("--hinge_margin", default=1, type=float, help="Margin for hinge loss function")
parser.add_argument("--regularizer", choices=["N3", "F2"], default="N3", help="Regularizer")
parser.add_argument("--regularizer_weight", default=0, type=float, help="Regularization weight")
parser.add_argument("--neg_sample_size", default=1, type=int, help="Negative sample size, -1 to not use")
parser.add_argument("--double_neg", action="store_true", default=False,
help="Whether to negative sample both head and tail entities")
# optim and config
parser.add_argument("--optim", default="adam", type=str, help="Optimization method.")
parser.add_argument("--amsgrad", action="store_true", default=False, help="Use AMS grad in Adam or AdamW")
parser.add_argument("--learning_rate", default=1e-3, type=float, help="Starting learning rate.")
parser.add_argument("--reduce_factor", default=2, type=float, help="Factor to reduce lr on plateau.") | parser.add_argument("--max_grad_norm", default=50.0, type=float, help="Max gradient norm.")
parser.add_argument("--batch_size", default=1000, type=int, help="Batch size.")
parser.add_argument("--eval_batch_size", default=100, type=int, help="Eval batch size. Has impact only on memory")
parser.add_argument("--epochs", default=100, type=int, help="Number of training epochs.")
parser.add_argument("--burnin", default=10, type=int, help="Number of initial epochs to train with reduce lr.")
parser.add_argument("--grad_accum_steps", default=1, type=int,
help="Number of update steps to accumulate before backward.")
parser.add_argument("--subsample", default=-1, type=float, help="Subsamples a % of valid triples")
# Others
parser.add_argument("--dtype", default="single", type=str, choices=["single", "double"], help="Machine precision")
parser.add_argument("--local_rank", type=int, help="Local process rank assigned by torch.distributed.launch")
parser.add_argument("--n_procs", default=4, type=int, help="Number of process to create")
parser.add_argument("--load_ckpt", default="", type=str, help="Load model from this file")
parser.add_argument("--results_file", default="out/results.csv", type=str, help="Exports final results to this file")
parser.add_argument("--save_epochs", default=10001, type=int, help="Exports every n epochs")
parser.add_argument("--seed", default=42, type=int, help="Seed")
parser.add_argument("--debug", dest='debug', action='store_true', default=False, help="Debug mode")
def load_ckpt(args, log):
saved_data = torch.load(args.load_ckpt) if args.load_ckpt else {}
args.init_epoch = 0
args.current_iter = 1
if saved_data:
local_rank = args.local_rank
if local_rank == 0:
log.info(f"Loaded CKPT: {args.load_ckpt}, Args in ckpt: {saved_data['args']}")
args = saved_data["args"]
args.local_rank = local_rank
args.init_epoch = saved_data["epochs"]
args.current_iter += 1
return args, saved_data
def get_model(args, saved_data=None):
if args.model == "tgspd":
model = TgSPDModel(args)
elif args.model == "tgrotspd":
model = TgSPDRotationModel(args)
elif args.model == "tgrefspd":
model = TgSPDReflectionModel(args)
elif args.model == "tgattnspd":
model = TgSPDAttnModel(args)
else:
raise ValueError(f"Unrecognized model argument: {args.model}")
model.to(config.DEVICE)
model = DistributedDataParallel(model, device_ids=None)
if saved_data:
model.load_state_dict(saved_data["model_state"])
return model
def get_optimizer(model, args, saved_data=None):
if args.optim == "sgd":
optim = SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
elif args.optim == "adam":
optim = Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adamw":
optim = AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adagrad":
optim = Adagrad(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
else:
raise ValueError(f"Unkown --optim option: {args.optim}")
if saved_data:
optim.load_state_dict(saved_data["optimizer_state"])
return optim
def get_scheduler(optimizer, args, saved_data=None):
patience = round(args.patience / args.val_every)
factor = 1 / float(args.reduce_factor)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=patience, factor=factor, mode="max")
if saved_data:
scheduler.load_state_dict(saved_data["scheduler_state"])
return scheduler
def build_data_loader(split, batch_size, shuffle, args):
"""
:param split: torch.LongTensor b x 3 with triples (h, r, t)
:param batch_size: int
:param shuffle: bool
:param args:
:return: torch DataLoader set up with distributed sampler
"""
tensor_dataset = TensorDataset(split)
sampler = DistributedSampler(tensor_dataset, num_replicas=args.n_procs, rank=args.local_rank, shuffle=shuffle)
data_loader = DataLoader(dataset=tensor_dataset, batch_size=batch_size, shuffle=False, num_workers=0,
pin_memory=True, sampler=sampler)
return data_loader
def load_training_data(args, log):
data_path = config.PREP_PATH / f"{args.data}/{config.PREPROCESSED_FILE}"
log.info(f"Loading data from {data_path}")
data = torch.load(str(data_path))
rel2id = data["rel2id"]
num_relations = len(rel2id)
train, valid, test = data["train"], data["valid"], data["test"]
if args.debug:
train = train[:10]
valid = valid[:4]
test = test[:4]
args.batch_size = 40
augmented = get_inverted_triples(train, num_relations)
train = np.vstack((train, augmented))
valid_proportion = args.subsample if args.subsample > 0 else 1
valid = valid[:int(len(valid) * valid_proportion)]
inverted_valid = get_inverted_triples(valid, num_relations)
inverted_test = get_inverted_triples(test, num_relations)
train, valid, inverted_valid, test, inverted_test = [torch.LongTensor(split) for split in
[train, valid, inverted_valid, test, inverted_test]]
train_batch_size = args.batch_size // args.n_procs
eval_batch_size = args.eval_batch_size // args.n_procs
log.info(f"Batch size {train_batch_size} for {args.local_rank}/{args.n_procs} processes")
train_loader = build_data_loader(train, train_batch_size, shuffle=True, args=args)
lhs_valid_loader = build_data_loader(valid, eval_batch_size, shuffle=False, args=args)
rhs_valid_loader = build_data_loader(inverted_valid, eval_batch_size, shuffle=False, args=args)
lhs_test_loader = build_data_loader(test, eval_batch_size, shuffle=False, args=args)
rhs_test_loader = build_data_loader(inverted_test, eval_batch_size, shuffle=False, args=args)
valid_loaders = {"lhs": lhs_valid_loader, "rhs": rhs_valid_loader}
test_loaders = {"lhs": lhs_test_loader, "rhs": rhs_test_loader}
# add the inverted relations into the rel2id dict
invrel2id = {f"INV_{rel_name}": rel_id + num_relations for rel_name, rel_id in rel2id.items()}
rel2id = {**rel2id, **invrel2id}
return train_loader, valid_loaders, test_loaders, data["filters"], data["ent2id"], rel2id
def main():
parser = argparse.ArgumentParser("train.py")
config_parser(parser)
args = parser.parse_args()
log = get_logging()
torch.set_default_dtype(torch.float64 if args.dtype == "double" else torch.float32)
args, saved_data = load_ckpt(args, log)
torch.autograd.set_detect_anomaly(args.debug)
# sets random seed
seed = args.seed if args.seed > 0 else random.randint(1, 1000000)
set_seed(seed)
if args.local_rank == 0:
log.info(args)
dist.init_process_group(backend=config.BACKEND, init_method='env://') # world_size=args.n_procs, rank=args.local_rank)
# correct parameters due to distributed training. In case of loading ckpt, this value will be
# ignored when we load the optimizer state dict
args.learning_rate *= args.n_procs
train_loader, valid_loaders, test_loaders, filters, ent2id, rel2id = load_training_data(args, log)
args.num_entities = len(ent2id)
args.num_relations = len(rel2id) # already has inverted relations
model = get_model(args, saved_data)
optimizer = get_optimizer(model, args, saved_data)
scheduler = get_scheduler(optimizer, args, saved_data)
loss = getattr(losses, args.loss)(args)
if args.local_rank == 0:
log.info(f"GPU's available: {torch.cuda.device_count()}")
n_params = sum([p.nelement() for p in model.parameters() if p.requires_grad])
log.info(f"Entities: {args.num_entities}, relations: {args.num_relations}, dims: {args.dims}, "
f"number of parameters: {n_params}")
log.info(model)
log.info(f"Triples train: {len(train_loader.dataset)}, valid lhs: {len(valid_loaders['lhs'].dataset)}, "
f"test lhs: {len(test_loaders['lhs'].dataset)}")
if args.model in ("spd", "tgspd") and args.metric == "fone" and args.dims % 2 == 1:
log.info("WARNING: SPD with Fone Metric and uneven number of dimensions can be unstable!!!")
runner = Runner(model, optimizer, scheduler=scheduler, loss=loss, ent2id=ent2id, rel2id=rel2id, args=args,
train_loader=train_loader, valid_loaders=valid_loaders, test_loaders=test_loaders, filters=filters)
runner.run()
log.info("Done!")
if __name__ == "__main__":
main() | parser.add_argument("--weight_decay", default=0.00, type=float, help="L2 Regularization.")
parser.add_argument("--val_every", default=5, type=int, help="Runs validation every n epochs.")
parser.add_argument("--patience", default=50, type=int, help="Epochs of patience for scheduler and early stop.") | random_line_split |
train.py | import argparse
import random
import numpy as np
import torch
import torch.distributed as dist
from torch.optim import Adam, AdamW, SGD
from torch.optim.adagrad import Adagrad
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from gyrospd import config
from gyrospd import losses
from gyrospd.utils import set_seed, get_logging, get_inverted_triples
from gyrospd.runner import Runner
from gyrospd.models import *
from gyrospd.manifolds.metrics import MetricType
def config_parser(parser):
# Data options
parser.add_argument("--data", required=True, type=str, help="Name of data set folder")
parser.add_argument("--run_id", required=True, type=str, help="Name of model/run to export")
# Model
parser.add_argument("--model", default="tgattnspd", type=str, help="Model type: tgspd, tgattnspd")
parser.add_argument("--metric", default="riem", type=str, help=f"Metrics: {[t.value for t in list(MetricType)]}")
parser.add_argument("--dims", default=4, type=int, help="Dimensions for the model.")
parser.add_argument("--train_bias", dest='train_bias', action='store_true', default=False,
help="Whether to train scaling or not.")
parser.add_argument("--use_hrh", default=1, type=int, help="Whether to use HRH or RHR in lhs op.")
parser.add_argument("--inverse_tail", default=0, type=int, help="Whether to use t or t^-1 as tail")
parser.add_argument("--loss", choices=["BCELoss", "HingeLoss"], default="BCELoss", help="Loss function")
parser.add_argument("--hinge_margin", default=1, type=float, help="Margin for hinge loss function")
parser.add_argument("--regularizer", choices=["N3", "F2"], default="N3", help="Regularizer")
parser.add_argument("--regularizer_weight", default=0, type=float, help="Regularization weight")
parser.add_argument("--neg_sample_size", default=1, type=int, help="Negative sample size, -1 to not use")
parser.add_argument("--double_neg", action="store_true", default=False,
help="Whether to negative sample both head and tail entities")
# optim and config
parser.add_argument("--optim", default="adam", type=str, help="Optimization method.")
parser.add_argument("--amsgrad", action="store_true", default=False, help="Use AMS grad in Adam or AdamW")
parser.add_argument("--learning_rate", default=1e-3, type=float, help="Starting learning rate.")
parser.add_argument("--reduce_factor", default=2, type=float, help="Factor to reduce lr on plateau.")
parser.add_argument("--weight_decay", default=0.00, type=float, help="L2 Regularization.")
parser.add_argument("--val_every", default=5, type=int, help="Runs validation every n epochs.")
parser.add_argument("--patience", default=50, type=int, help="Epochs of patience for scheduler and early stop.")
parser.add_argument("--max_grad_norm", default=50.0, type=float, help="Max gradient norm.")
parser.add_argument("--batch_size", default=1000, type=int, help="Batch size.")
parser.add_argument("--eval_batch_size", default=100, type=int, help="Eval batch size. Has impact only on memory")
parser.add_argument("--epochs", default=100, type=int, help="Number of training epochs.")
parser.add_argument("--burnin", default=10, type=int, help="Number of initial epochs to train with reduce lr.")
parser.add_argument("--grad_accum_steps", default=1, type=int,
help="Number of update steps to accumulate before backward.")
parser.add_argument("--subsample", default=-1, type=float, help="Subsamples a % of valid triples")
# Others
parser.add_argument("--dtype", default="single", type=str, choices=["single", "double"], help="Machine precision")
parser.add_argument("--local_rank", type=int, help="Local process rank assigned by torch.distributed.launch")
parser.add_argument("--n_procs", default=4, type=int, help="Number of process to create")
parser.add_argument("--load_ckpt", default="", type=str, help="Load model from this file")
parser.add_argument("--results_file", default="out/results.csv", type=str, help="Exports final results to this file")
parser.add_argument("--save_epochs", default=10001, type=int, help="Exports every n epochs")
parser.add_argument("--seed", default=42, type=int, help="Seed")
parser.add_argument("--debug", dest='debug', action='store_true', default=False, help="Debug mode")
def load_ckpt(args, log):
saved_data = torch.load(args.load_ckpt) if args.load_ckpt else {}
args.init_epoch = 0
args.current_iter = 1
if saved_data:
local_rank = args.local_rank
if local_rank == 0:
log.info(f"Loaded CKPT: {args.load_ckpt}, Args in ckpt: {saved_data['args']}")
args = saved_data["args"]
args.local_rank = local_rank
args.init_epoch = saved_data["epochs"]
args.current_iter += 1
return args, saved_data
def get_model(args, saved_data=None):
if args.model == "tgspd":
model = TgSPDModel(args)
elif args.model == "tgrotspd":
model = TgSPDRotationModel(args)
elif args.model == "tgrefspd":
model = TgSPDReflectionModel(args)
elif args.model == "tgattnspd":
model = TgSPDAttnModel(args)
else:
raise ValueError(f"Unrecognized model argument: {args.model}")
model.to(config.DEVICE)
model = DistributedDataParallel(model, device_ids=None)
if saved_data:
model.load_state_dict(saved_data["model_state"])
return model
def get_optimizer(model, args, saved_data=None):
if args.optim == "sgd":
optim = SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
elif args.optim == "adam":
optim = Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adamw":
optim = AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adagrad":
optim = Adagrad(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
else:
raise ValueError(f"Unkown --optim option: {args.optim}")
if saved_data:
optim.load_state_dict(saved_data["optimizer_state"])
return optim
def get_scheduler(optimizer, args, saved_data=None):
|
def build_data_loader(split, batch_size, shuffle, args):
"""
:param split: torch.LongTensor b x 3 with triples (h, r, t)
:param batch_size: int
:param shuffle: bool
:param args:
:return: torch DataLoader set up with distributed sampler
"""
tensor_dataset = TensorDataset(split)
sampler = DistributedSampler(tensor_dataset, num_replicas=args.n_procs, rank=args.local_rank, shuffle=shuffle)
data_loader = DataLoader(dataset=tensor_dataset, batch_size=batch_size, shuffle=False, num_workers=0,
pin_memory=True, sampler=sampler)
return data_loader
def load_training_data(args, log):
data_path = config.PREP_PATH / f"{args.data}/{config.PREPROCESSED_FILE}"
log.info(f"Loading data from {data_path}")
data = torch.load(str(data_path))
rel2id = data["rel2id"]
num_relations = len(rel2id)
train, valid, test = data["train"], data["valid"], data["test"]
if args.debug:
train = train[:10]
valid = valid[:4]
test = test[:4]
args.batch_size = 40
augmented = get_inverted_triples(train, num_relations)
train = np.vstack((train, augmented))
valid_proportion = args.subsample if args.subsample > 0 else 1
valid = valid[:int(len(valid) * valid_proportion)]
inverted_valid = get_inverted_triples(valid, num_relations)
inverted_test = get_inverted_triples(test, num_relations)
train, valid, inverted_valid, test, inverted_test = [torch.LongTensor(split) for split in
[train, valid, inverted_valid, test, inverted_test]]
train_batch_size = args.batch_size // args.n_procs
eval_batch_size = args.eval_batch_size // args.n_procs
log.info(f"Batch size {train_batch_size} for {args.local_rank}/{args.n_procs} processes")
train_loader = build_data_loader(train, train_batch_size, shuffle=True, args=args)
lhs_valid_loader = build_data_loader(valid, eval_batch_size, shuffle=False, args=args)
rhs_valid_loader = build_data_loader(inverted_valid, eval_batch_size, shuffle=False, args=args)
lhs_test_loader = build_data_loader(test, eval_batch_size, shuffle=False, args=args)
rhs_test_loader = build_data_loader(inverted_test, eval_batch_size, shuffle=False, args=args)
valid_loaders = {"lhs": lhs_valid_loader, "rhs": rhs_valid_loader}
test_loaders = {"lhs": lhs_test_loader, "rhs": rhs_test_loader}
# add the inverted relations into the rel2id dict
invrel2id = {f"INV_{rel_name}": rel_id + num_relations for rel_name, rel_id in rel2id.items()}
rel2id = {**rel2id, **invrel2id}
return train_loader, valid_loaders, test_loaders, data["filters"], data["ent2id"], rel2id
def main():
parser = argparse.ArgumentParser("train.py")
config_parser(parser)
args = parser.parse_args()
log = get_logging()
torch.set_default_dtype(torch.float64 if args.dtype == "double" else torch.float32)
args, saved_data = load_ckpt(args, log)
torch.autograd.set_detect_anomaly(args.debug)
# sets random seed
seed = args.seed if args.seed > 0 else random.randint(1, 1000000)
set_seed(seed)
if args.local_rank == 0:
log.info(args)
dist.init_process_group(backend=config.BACKEND, init_method='env://') # world_size=args.n_procs, rank=args.local_rank)
# correct parameters due to distributed training. In case of loading ckpt, this value will be
# ignored when we load the optimizer state dict
args.learning_rate *= args.n_procs
train_loader, valid_loaders, test_loaders, filters, ent2id, rel2id = load_training_data(args, log)
args.num_entities = len(ent2id)
args.num_relations = len(rel2id) # already has inverted relations
model = get_model(args, saved_data)
optimizer = get_optimizer(model, args, saved_data)
scheduler = get_scheduler(optimizer, args, saved_data)
loss = getattr(losses, args.loss)(args)
if args.local_rank == 0:
log.info(f"GPU's available: {torch.cuda.device_count()}")
n_params = sum([p.nelement() for p in model.parameters() if p.requires_grad])
log.info(f"Entities: {args.num_entities}, relations: {args.num_relations}, dims: {args.dims}, "
f"number of parameters: {n_params}")
log.info(model)
log.info(f"Triples train: {len(train_loader.dataset)}, valid lhs: {len(valid_loaders['lhs'].dataset)}, "
f"test lhs: {len(test_loaders['lhs'].dataset)}")
if args.model in ("spd", "tgspd") and args.metric == "fone" and args.dims % 2 == 1:
log.info("WARNING: SPD with Fone Metric and uneven number of dimensions can be unstable!!!")
runner = Runner(model, optimizer, scheduler=scheduler, loss=loss, ent2id=ent2id, rel2id=rel2id, args=args,
train_loader=train_loader, valid_loaders=valid_loaders, test_loaders=test_loaders, filters=filters)
runner.run()
log.info("Done!")
if __name__ == "__main__":
main()
| patience = round(args.patience / args.val_every)
factor = 1 / float(args.reduce_factor)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=patience, factor=factor, mode="max")
if saved_data:
scheduler.load_state_dict(saved_data["scheduler_state"])
return scheduler | identifier_body |
train.py | import argparse
import random
import numpy as np
import torch
import torch.distributed as dist
from torch.optim import Adam, AdamW, SGD
from torch.optim.adagrad import Adagrad
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from gyrospd import config
from gyrospd import losses
from gyrospd.utils import set_seed, get_logging, get_inverted_triples
from gyrospd.runner import Runner
from gyrospd.models import *
from gyrospd.manifolds.metrics import MetricType
def | (parser):
# Data options
parser.add_argument("--data", required=True, type=str, help="Name of data set folder")
parser.add_argument("--run_id", required=True, type=str, help="Name of model/run to export")
# Model
parser.add_argument("--model", default="tgattnspd", type=str, help="Model type: tgspd, tgattnspd")
parser.add_argument("--metric", default="riem", type=str, help=f"Metrics: {[t.value for t in list(MetricType)]}")
parser.add_argument("--dims", default=4, type=int, help="Dimensions for the model.")
parser.add_argument("--train_bias", dest='train_bias', action='store_true', default=False,
help="Whether to train scaling or not.")
parser.add_argument("--use_hrh", default=1, type=int, help="Whether to use HRH or RHR in lhs op.")
parser.add_argument("--inverse_tail", default=0, type=int, help="Whether to use t or t^-1 as tail")
parser.add_argument("--loss", choices=["BCELoss", "HingeLoss"], default="BCELoss", help="Loss function")
parser.add_argument("--hinge_margin", default=1, type=float, help="Margin for hinge loss function")
parser.add_argument("--regularizer", choices=["N3", "F2"], default="N3", help="Regularizer")
parser.add_argument("--regularizer_weight", default=0, type=float, help="Regularization weight")
parser.add_argument("--neg_sample_size", default=1, type=int, help="Negative sample size, -1 to not use")
parser.add_argument("--double_neg", action="store_true", default=False,
help="Whether to negative sample both head and tail entities")
# optim and config
parser.add_argument("--optim", default="adam", type=str, help="Optimization method.")
parser.add_argument("--amsgrad", action="store_true", default=False, help="Use AMS grad in Adam or AdamW")
parser.add_argument("--learning_rate", default=1e-3, type=float, help="Starting learning rate.")
parser.add_argument("--reduce_factor", default=2, type=float, help="Factor to reduce lr on plateau.")
parser.add_argument("--weight_decay", default=0.00, type=float, help="L2 Regularization.")
parser.add_argument("--val_every", default=5, type=int, help="Runs validation every n epochs.")
parser.add_argument("--patience", default=50, type=int, help="Epochs of patience for scheduler and early stop.")
parser.add_argument("--max_grad_norm", default=50.0, type=float, help="Max gradient norm.")
parser.add_argument("--batch_size", default=1000, type=int, help="Batch size.")
parser.add_argument("--eval_batch_size", default=100, type=int, help="Eval batch size. Has impact only on memory")
parser.add_argument("--epochs", default=100, type=int, help="Number of training epochs.")
parser.add_argument("--burnin", default=10, type=int, help="Number of initial epochs to train with reduce lr.")
parser.add_argument("--grad_accum_steps", default=1, type=int,
help="Number of update steps to accumulate before backward.")
parser.add_argument("--subsample", default=-1, type=float, help="Subsamples a % of valid triples")
# Others
parser.add_argument("--dtype", default="single", type=str, choices=["single", "double"], help="Machine precision")
parser.add_argument("--local_rank", type=int, help="Local process rank assigned by torch.distributed.launch")
parser.add_argument("--n_procs", default=4, type=int, help="Number of process to create")
parser.add_argument("--load_ckpt", default="", type=str, help="Load model from this file")
parser.add_argument("--results_file", default="out/results.csv", type=str, help="Exports final results to this file")
parser.add_argument("--save_epochs", default=10001, type=int, help="Exports every n epochs")
parser.add_argument("--seed", default=42, type=int, help="Seed")
parser.add_argument("--debug", dest='debug', action='store_true', default=False, help="Debug mode")
def load_ckpt(args, log):
saved_data = torch.load(args.load_ckpt) if args.load_ckpt else {}
args.init_epoch = 0
args.current_iter = 1
if saved_data:
local_rank = args.local_rank
if local_rank == 0:
log.info(f"Loaded CKPT: {args.load_ckpt}, Args in ckpt: {saved_data['args']}")
args = saved_data["args"]
args.local_rank = local_rank
args.init_epoch = saved_data["epochs"]
args.current_iter += 1
return args, saved_data
def get_model(args, saved_data=None):
if args.model == "tgspd":
model = TgSPDModel(args)
elif args.model == "tgrotspd":
model = TgSPDRotationModel(args)
elif args.model == "tgrefspd":
model = TgSPDReflectionModel(args)
elif args.model == "tgattnspd":
model = TgSPDAttnModel(args)
else:
raise ValueError(f"Unrecognized model argument: {args.model}")
model.to(config.DEVICE)
model = DistributedDataParallel(model, device_ids=None)
if saved_data:
model.load_state_dict(saved_data["model_state"])
return model
def get_optimizer(model, args, saved_data=None):
if args.optim == "sgd":
optim = SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
elif args.optim == "adam":
optim = Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adamw":
optim = AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adagrad":
optim = Adagrad(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
else:
raise ValueError(f"Unkown --optim option: {args.optim}")
if saved_data:
optim.load_state_dict(saved_data["optimizer_state"])
return optim
def get_scheduler(optimizer, args, saved_data=None):
patience = round(args.patience / args.val_every)
factor = 1 / float(args.reduce_factor)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=patience, factor=factor, mode="max")
if saved_data:
scheduler.load_state_dict(saved_data["scheduler_state"])
return scheduler
def build_data_loader(split, batch_size, shuffle, args):
"""
:param split: torch.LongTensor b x 3 with triples (h, r, t)
:param batch_size: int
:param shuffle: bool
:param args:
:return: torch DataLoader set up with distributed sampler
"""
tensor_dataset = TensorDataset(split)
sampler = DistributedSampler(tensor_dataset, num_replicas=args.n_procs, rank=args.local_rank, shuffle=shuffle)
data_loader = DataLoader(dataset=tensor_dataset, batch_size=batch_size, shuffle=False, num_workers=0,
pin_memory=True, sampler=sampler)
return data_loader
def load_training_data(args, log):
data_path = config.PREP_PATH / f"{args.data}/{config.PREPROCESSED_FILE}"
log.info(f"Loading data from {data_path}")
data = torch.load(str(data_path))
rel2id = data["rel2id"]
num_relations = len(rel2id)
train, valid, test = data["train"], data["valid"], data["test"]
if args.debug:
train = train[:10]
valid = valid[:4]
test = test[:4]
args.batch_size = 40
augmented = get_inverted_triples(train, num_relations)
train = np.vstack((train, augmented))
valid_proportion = args.subsample if args.subsample > 0 else 1
valid = valid[:int(len(valid) * valid_proportion)]
inverted_valid = get_inverted_triples(valid, num_relations)
inverted_test = get_inverted_triples(test, num_relations)
train, valid, inverted_valid, test, inverted_test = [torch.LongTensor(split) for split in
[train, valid, inverted_valid, test, inverted_test]]
train_batch_size = args.batch_size // args.n_procs
eval_batch_size = args.eval_batch_size // args.n_procs
log.info(f"Batch size {train_batch_size} for {args.local_rank}/{args.n_procs} processes")
train_loader = build_data_loader(train, train_batch_size, shuffle=True, args=args)
lhs_valid_loader = build_data_loader(valid, eval_batch_size, shuffle=False, args=args)
rhs_valid_loader = build_data_loader(inverted_valid, eval_batch_size, shuffle=False, args=args)
lhs_test_loader = build_data_loader(test, eval_batch_size, shuffle=False, args=args)
rhs_test_loader = build_data_loader(inverted_test, eval_batch_size, shuffle=False, args=args)
valid_loaders = {"lhs": lhs_valid_loader, "rhs": rhs_valid_loader}
test_loaders = {"lhs": lhs_test_loader, "rhs": rhs_test_loader}
# add the inverted relations into the rel2id dict
invrel2id = {f"INV_{rel_name}": rel_id + num_relations for rel_name, rel_id in rel2id.items()}
rel2id = {**rel2id, **invrel2id}
return train_loader, valid_loaders, test_loaders, data["filters"], data["ent2id"], rel2id
def main():
parser = argparse.ArgumentParser("train.py")
config_parser(parser)
args = parser.parse_args()
log = get_logging()
torch.set_default_dtype(torch.float64 if args.dtype == "double" else torch.float32)
args, saved_data = load_ckpt(args, log)
torch.autograd.set_detect_anomaly(args.debug)
# sets random seed
seed = args.seed if args.seed > 0 else random.randint(1, 1000000)
set_seed(seed)
if args.local_rank == 0:
log.info(args)
dist.init_process_group(backend=config.BACKEND, init_method='env://') # world_size=args.n_procs, rank=args.local_rank)
# correct parameters due to distributed training. In case of loading ckpt, this value will be
# ignored when we load the optimizer state dict
args.learning_rate *= args.n_procs
train_loader, valid_loaders, test_loaders, filters, ent2id, rel2id = load_training_data(args, log)
args.num_entities = len(ent2id)
args.num_relations = len(rel2id) # already has inverted relations
model = get_model(args, saved_data)
optimizer = get_optimizer(model, args, saved_data)
scheduler = get_scheduler(optimizer, args, saved_data)
loss = getattr(losses, args.loss)(args)
if args.local_rank == 0:
log.info(f"GPU's available: {torch.cuda.device_count()}")
n_params = sum([p.nelement() for p in model.parameters() if p.requires_grad])
log.info(f"Entities: {args.num_entities}, relations: {args.num_relations}, dims: {args.dims}, "
f"number of parameters: {n_params}")
log.info(model)
log.info(f"Triples train: {len(train_loader.dataset)}, valid lhs: {len(valid_loaders['lhs'].dataset)}, "
f"test lhs: {len(test_loaders['lhs'].dataset)}")
if args.model in ("spd", "tgspd") and args.metric == "fone" and args.dims % 2 == 1:
log.info("WARNING: SPD with Fone Metric and uneven number of dimensions can be unstable!!!")
runner = Runner(model, optimizer, scheduler=scheduler, loss=loss, ent2id=ent2id, rel2id=rel2id, args=args,
train_loader=train_loader, valid_loaders=valid_loaders, test_loaders=test_loaders, filters=filters)
runner.run()
log.info("Done!")
if __name__ == "__main__":
main()
| config_parser | identifier_name |
feffpath.py | #!/usr/bin/python
"""
Feff85L EXAFS Scatternig Path for Python
"""
from __future__ import print_function
import os
import sys
import ctypes
from ctypes import POINTER, pointer, c_int, c_long, c_char_p, c_double
import numpy as np
def load_feff8lpath():
dllform = 'lib{:s}.so'
pathsep = ':'
loadlib = ctypes.cdll
if sys.platform.lower().startswith('darwin'):
dllform = 'lib{:s}.dylib'
if os.name == 'nt':
dllform = '{:s}.dll'
pathsep = ';'
loadlib = ctypes.windll
dllname = dllform.format('feff8lpath')
spath = ['.']
spath.extend(os.environ.get('LD_LIBRARY_PATH','').split(pathsep))
spath.extend(os.environ.get('PATH', '').split(pathsep))
spath.extend(['../../local_install/lib/', '../../src/GENFMT/lib'])
for dname in spath:
fullname = os.path.join(dname, dllname)
if os.path.exists(fullname):
return loadlib.LoadLibrary(fullname)
return None
FLIB = load_feff8lpath()
FEFF_maxpts = 150 # nex
FEFF_maxpot = 11 # nphx
FEFF_maxleg = 9 # legtot
BOHR = 0.5291772490
# bytes/str conversion
str2bytes = bytes2str = str
if sys.version_info[0] == 3:
def bytes2str(val):
if isinstance(val, str):
return val
if isinstance(val, bytes):
return str(val, 'utf-8')
return str(val)
def str2bytes(val):
if isinstance(val, bytes):
return val
return bytes(val, 'utf-8')
def with_phase_file(fcn):
"""decorator to ensure that the wrapped function either
has a non-None 'phase_file' argument or that that
self.phase_file is not None
"""
errmsg = "function '%s' needs a non-None phase_file"
def wrapper(*args, **keywords):
"needs phase_file"
phase_file = keywords.get('phase_file', None)
if phase_file is None:
phase_file = getattr(args[0], 'phase_file', None)
if phase_file is None:
raise AttributeError(errmsg % fcn.__name__)
else:
setattr(args[0], 'phase_file', phase_file)
# raise Warning(errmsg % fcn.__name__)
return fcn(*args, **keywords)
wrapper.__doc__ = fcn.__doc__
wrapper.__name__ = fcn.__name__
wrapper.__filename__ = fcn.__code__.co_filename
wrapper.__dict__.update(fcn.__dict__)
return wrapper
class ScatteringPath(object):
"""A Scatering Path for calculating a XAFS signal with Feff
A calculation requires a Potentials and Phase Shift calculation
in PAD format from Feff85, and a list of scattering paths
Usage:
------
# create path
path = ScatteringPath(phase_file='phase.pad')
# list 'ipot' and labels for absorber, scattererers
path.list_scatterers()
# set coords for absorbing atom
path.set_absorber(x=0., y=0., z=0.)
# add scattering atom
path.add_scatterer(x=1.5, y=1.5, z=1.5, ipot=1)
# calculate basic (unaltered) XAFS contributions
path.calcuate_xafs()
"""
def __init__(self, phase_file=None):
self.phase_file = phase_file
self.clear()
def clear(self):
"""reset all path data"""
self.index = 1
self.degen = 1.
self.nnnn_out = False
self.json_out = False
self.verbose = False
self.ipol = 0
self.ellip = 0.
self.nepts = 0
self.genfmt_order = 2
self.genfmt_vers = ""
self.exch_label = ""
self.rs = 0.
self.vint = 0.
self.xmu = 0.
self.edge = 0.
self.kf = 0.
self.rnorman = 0.
self.gamach = 0.
self.nepts = FEFF_maxpts
dargs = dict(dtype=np.float64, order='F')
largs = dict(dtype=np.int32, order='F')
self.evec = np.zeros(3, **dargs)
self.xivec = np.zeros(3, **dargs)
self.ipot = np.zeros(1+FEFF_maxleg, **largs)
self.beta = np.zeros(1+FEFF_maxleg, **dargs)
self.eta = np.zeros(2+FEFF_maxleg, **dargs)
self.ri = np.zeros(FEFF_maxleg, **dargs)
self.rat = np.zeros((3, 2+FEFF_maxleg), **dargs)
self.iz = np.zeros(1+FEFF_maxpot, **largs)
self.kfeff = np.zeros(FEFF_maxpts, **dargs)
self.real_phc = np.zeros(FEFF_maxpts, **dargs)
self.mag_feff = np.zeros(FEFF_maxpts, **dargs)
self.pha_feff = np.zeros(FEFF_maxpts, **dargs)
self.red_fact = np.zeros(FEFF_maxpts, **dargs)
self.lam = np.zeros(FEFF_maxpts, **dargs)
self.rep = np.zeros(FEFF_maxpts, **dargs)
self.nleg = 1
@with_phase_file
def list_scatterers(self, phase_file=None):
"""list Feff Potentials atoms ('ipots') fo phase file"""
atoms = []
with open(self.phase_file,'r') as fh:
line1_words = fh.readline().strip().split()
text = fh.readlines()
nphases = int(line1_words[4])
for line in text[4:]:
if line.startswith('$'): continue
words = line.split()
atoms.append((int(words[1]), words[2]))
if len(atoms) > nphases:
break
out = ["# Potential Z Symbol"]
for ipot, atom in enumerate(atoms):
out.append(" %2i %3i %s" % (ipot, atom[0], atom[1]))
return "\n".join(out)
@with_phase_file
def set_absorber(self, x=0., y=0., z=0., phase_file=None):
"""set coordinates for absorbing atom ('ipot'=0)"""
self.rat[0, 0] = x
self.rat[1, 0] = y
self.rat[2, 0] = z
@with_phase_file
def add_scatterer(self, x=0., y=0., z=0., ipot=1, phase_file=None):
self.rat[0, self.nleg] = x
self.rat[1, self.nleg] = y
self.rat[2, self.nleg] = z
self.ipot[self.nleg] = ipot
self.nleg += 1
# set final atom coords to same as absorber
self.rat[0, self.nleg] = self.rat[0, 0]
self.rat[1, self.nleg] = self.rat[1, 0]
self.rat[2, self.nleg] = self.rat[2, 0]
self.ipot[self.nleg] = self.ipot[0]
@with_phase_file
def calculate_xafs(self, phase_file=None):
|
if __name__ == '__main__':
path = ScatteringPath(phase_file='phase.pad')
path.set_absorber( x=0.01, y=0.1, z=0.01)
path.add_scatterer(x=1.8058, y=0.005, z=1.8063, ipot=1)
path.degen = 12
path.calculate_xafs()
print('# Calculate EXAFS with PhaseFile: {:s}'.format(path.phase_file))
print('# Path Geometry: \n# IPOT IZ X Y Z')
for i in range(path.nleg):
ipot = path.ipot[i]
iz = path.iz[ipot]
rat = path.rat[:,i]
print("# %2i %2i %8.4f %8.4f %8.4f" % (ipot,iz, rat[0], rat[1], rat[2]))
print("# Polarization: {:d}, ellipticity={:4f}".format(path.ipol, path.ellip))
print("# Polarization E Vector = {:s}".format(", ".join(["%.4f" % a for a in path.evec])))
print("# Polarization X Vector = {:s}".format(", ".join(["%.4f" % a for a in path.xivec])))
print("# Path Settings")
for attr in ('rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach'):
print("# {:8s} = {:+4f} ".format(attr, getattr(path, attr)))
for attr in ('exch_label', 'genfmt_version'):
print("# {:8s} = {:s} ".format(attr, getattr(path, attr)))
print("Path settings: degen=%10.5f, xmu=%10.5f, kf=%10.5f" % (path.degen, path.xmu, path.kf))
npts = 1 + max(np.where(path.kfeff > 0)[0])
print("# k rep real_phc phase_feff mag_feff red_factor lambda ")
fmt = " %6.3f %11.7f %11.7f %11.7f %11.7f %11.7f %11.7f"
for i in range(int(npts/3.0)):
print(fmt % (path.kfeff[i], path.rep[i], path.real_phc[i], path.pha_feff[i],
path.mag_feff[i], path.red_fact[i], path.lam[i]))
# print(fmt.format(path.kfeff[i], path.rep[i], path.real_phc[i],
# path.mag_feff[i], path.red_fact[i], path.lam[i]))
| class args: pass
# strings / char*. Note fixed length to match Fortran
args.phase_file = (self.phase_file + ' '*256)[:256]
args.exch_label = str2bytes(' '*8)
args.genfmt_version = str2bytes(' '*30)
# integers, including booleans
for attr in ('index', 'nleg', 'genfmt_order', 'ipol', 'nnnn_out',
'json_out', 'verbose', 'nepts'):
setattr(args, attr, pointer(c_long(int(getattr(self, attr)))))
# doubles
for attr in ('degen', 'rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman',
'gamach', 'ellip'):
setattr(args, attr, pointer(c_double(getattr(self, attr))))
# integer arrays
for attr in ('ipot', 'iz'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_int))
setattr(args, attr, cdata)
# double arrays
self.rat = self.rat/BOHR
for attr in ('evec', 'xivec', 'rat', 'ri', 'beta', 'eta',
'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_double))
setattr(args, attr, cdata)
x = FLIB.calc_onepath(args.phase_file, args.index, args.nleg,
args.degen, args.genfmt_order,
args.exch_label, args.rs, args.vint,
args.xmu, args.edge, args.kf, args.rnorman,
args.gamach, args.genfmt_version, args.ipot,
args.rat, args.iz, args.ipol, args.evec,
args.ellip, args.xivec, args.nnnn_out,
args.json_out, args.verbose, args.ri,
args.beta, args.eta, args.nepts, args.kfeff,
args.real_phc, args.mag_feff, args.pha_feff,
args.red_fact, args.lam, args.rep)
self.exch_label = bytes2str(args.exch_label).strip()
self.genfmt_version = bytes2str(args.genfmt_version).strip()
for attr in ('index', 'nleg', 'genfmt_order', 'degen', 'rs',
'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach',
'ipol', 'ellip', 'nnnn_out', 'json_out', 'verbose',
'nepts'):
setattr(self, attr, getattr(args, attr).contents.value)
for attr in ('ipot', 'evec', 'xivec', 'beta', 'eta', 'ri', 'rat',
'iz', 'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
setattr(self, attr, np.array(getattr(args, attr).contents[:]))
# some data needs recasting, reformatting
self.nnnn_out = bool(self.nnnn_out)
self.json_out = bool(self.json_out)
self.verbose = bool(self.verbose)
self.rat = self.rat.reshape((2+FEFF_maxleg, 3)).transpose()*BOHR | identifier_body |
feffpath.py | #!/usr/bin/python
"""
Feff85L EXAFS Scatternig Path for Python
"""
from __future__ import print_function
import os
import sys
import ctypes
from ctypes import POINTER, pointer, c_int, c_long, c_char_p, c_double
import numpy as np
def load_feff8lpath():
dllform = 'lib{:s}.so'
pathsep = ':'
loadlib = ctypes.cdll
if sys.platform.lower().startswith('darwin'):
dllform = 'lib{:s}.dylib'
if os.name == 'nt':
dllform = '{:s}.dll'
pathsep = ';'
loadlib = ctypes.windll
dllname = dllform.format('feff8lpath')
spath = ['.']
spath.extend(os.environ.get('LD_LIBRARY_PATH','').split(pathsep))
spath.extend(os.environ.get('PATH', '').split(pathsep))
spath.extend(['../../local_install/lib/', '../../src/GENFMT/lib'])
for dname in spath:
fullname = os.path.join(dname, dllname)
if os.path.exists(fullname):
return loadlib.LoadLibrary(fullname)
return None
FLIB = load_feff8lpath()
FEFF_maxpts = 150 # nex
FEFF_maxpot = 11 # nphx
FEFF_maxleg = 9 # legtot
BOHR = 0.5291772490
# bytes/str conversion
str2bytes = bytes2str = str
if sys.version_info[0] == 3:
def bytes2str(val):
if isinstance(val, str):
return val
if isinstance(val, bytes):
return str(val, 'utf-8')
return str(val)
def str2bytes(val):
if isinstance(val, bytes):
return val
return bytes(val, 'utf-8')
def with_phase_file(fcn):
"""decorator to ensure that the wrapped function either
has a non-None 'phase_file' argument or that that
self.phase_file is not None
"""
errmsg = "function '%s' needs a non-None phase_file"
def wrapper(*args, **keywords):
"needs phase_file"
phase_file = keywords.get('phase_file', None)
if phase_file is None:
phase_file = getattr(args[0], 'phase_file', None)
if phase_file is None:
raise AttributeError(errmsg % fcn.__name__)
else:
setattr(args[0], 'phase_file', phase_file)
# raise Warning(errmsg % fcn.__name__)
return fcn(*args, **keywords)
wrapper.__doc__ = fcn.__doc__
wrapper.__name__ = fcn.__name__
wrapper.__filename__ = fcn.__code__.co_filename
wrapper.__dict__.update(fcn.__dict__)
return wrapper
class ScatteringPath(object):
"""A Scatering Path for calculating a XAFS signal with Feff
A calculation requires a Potentials and Phase Shift calculation
in PAD format from Feff85, and a list of scattering paths
Usage:
------
# create path
path = ScatteringPath(phase_file='phase.pad')
# list 'ipot' and labels for absorber, scattererers
path.list_scatterers()
# set coords for absorbing atom
path.set_absorber(x=0., y=0., z=0.)
# add scattering atom
path.add_scatterer(x=1.5, y=1.5, z=1.5, ipot=1)
|
"""
def __init__(self, phase_file=None):
self.phase_file = phase_file
self.clear()
def clear(self):
"""reset all path data"""
self.index = 1
self.degen = 1.
self.nnnn_out = False
self.json_out = False
self.verbose = False
self.ipol = 0
self.ellip = 0.
self.nepts = 0
self.genfmt_order = 2
self.genfmt_vers = ""
self.exch_label = ""
self.rs = 0.
self.vint = 0.
self.xmu = 0.
self.edge = 0.
self.kf = 0.
self.rnorman = 0.
self.gamach = 0.
self.nepts = FEFF_maxpts
dargs = dict(dtype=np.float64, order='F')
largs = dict(dtype=np.int32, order='F')
self.evec = np.zeros(3, **dargs)
self.xivec = np.zeros(3, **dargs)
self.ipot = np.zeros(1+FEFF_maxleg, **largs)
self.beta = np.zeros(1+FEFF_maxleg, **dargs)
self.eta = np.zeros(2+FEFF_maxleg, **dargs)
self.ri = np.zeros(FEFF_maxleg, **dargs)
self.rat = np.zeros((3, 2+FEFF_maxleg), **dargs)
self.iz = np.zeros(1+FEFF_maxpot, **largs)
self.kfeff = np.zeros(FEFF_maxpts, **dargs)
self.real_phc = np.zeros(FEFF_maxpts, **dargs)
self.mag_feff = np.zeros(FEFF_maxpts, **dargs)
self.pha_feff = np.zeros(FEFF_maxpts, **dargs)
self.red_fact = np.zeros(FEFF_maxpts, **dargs)
self.lam = np.zeros(FEFF_maxpts, **dargs)
self.rep = np.zeros(FEFF_maxpts, **dargs)
self.nleg = 1
@with_phase_file
def list_scatterers(self, phase_file=None):
"""list Feff Potentials atoms ('ipots') fo phase file"""
atoms = []
with open(self.phase_file,'r') as fh:
line1_words = fh.readline().strip().split()
text = fh.readlines()
nphases = int(line1_words[4])
for line in text[4:]:
if line.startswith('$'): continue
words = line.split()
atoms.append((int(words[1]), words[2]))
if len(atoms) > nphases:
break
out = ["# Potential Z Symbol"]
for ipot, atom in enumerate(atoms):
out.append(" %2i %3i %s" % (ipot, atom[0], atom[1]))
return "\n".join(out)
@with_phase_file
def set_absorber(self, x=0., y=0., z=0., phase_file=None):
"""set coordinates for absorbing atom ('ipot'=0)"""
self.rat[0, 0] = x
self.rat[1, 0] = y
self.rat[2, 0] = z
@with_phase_file
def add_scatterer(self, x=0., y=0., z=0., ipot=1, phase_file=None):
self.rat[0, self.nleg] = x
self.rat[1, self.nleg] = y
self.rat[2, self.nleg] = z
self.ipot[self.nleg] = ipot
self.nleg += 1
# set final atom coords to same as absorber
self.rat[0, self.nleg] = self.rat[0, 0]
self.rat[1, self.nleg] = self.rat[1, 0]
self.rat[2, self.nleg] = self.rat[2, 0]
self.ipot[self.nleg] = self.ipot[0]
@with_phase_file
def calculate_xafs(self, phase_file=None):
class args: pass
# strings / char*. Note fixed length to match Fortran
args.phase_file = (self.phase_file + ' '*256)[:256]
args.exch_label = str2bytes(' '*8)
args.genfmt_version = str2bytes(' '*30)
# integers, including booleans
for attr in ('index', 'nleg', 'genfmt_order', 'ipol', 'nnnn_out',
'json_out', 'verbose', 'nepts'):
setattr(args, attr, pointer(c_long(int(getattr(self, attr)))))
# doubles
for attr in ('degen', 'rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman',
'gamach', 'ellip'):
setattr(args, attr, pointer(c_double(getattr(self, attr))))
# integer arrays
for attr in ('ipot', 'iz'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_int))
setattr(args, attr, cdata)
# double arrays
self.rat = self.rat/BOHR
for attr in ('evec', 'xivec', 'rat', 'ri', 'beta', 'eta',
'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_double))
setattr(args, attr, cdata)
x = FLIB.calc_onepath(args.phase_file, args.index, args.nleg,
args.degen, args.genfmt_order,
args.exch_label, args.rs, args.vint,
args.xmu, args.edge, args.kf, args.rnorman,
args.gamach, args.genfmt_version, args.ipot,
args.rat, args.iz, args.ipol, args.evec,
args.ellip, args.xivec, args.nnnn_out,
args.json_out, args.verbose, args.ri,
args.beta, args.eta, args.nepts, args.kfeff,
args.real_phc, args.mag_feff, args.pha_feff,
args.red_fact, args.lam, args.rep)
self.exch_label = bytes2str(args.exch_label).strip()
self.genfmt_version = bytes2str(args.genfmt_version).strip()
for attr in ('index', 'nleg', 'genfmt_order', 'degen', 'rs',
'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach',
'ipol', 'ellip', 'nnnn_out', 'json_out', 'verbose',
'nepts'):
setattr(self, attr, getattr(args, attr).contents.value)
for attr in ('ipot', 'evec', 'xivec', 'beta', 'eta', 'ri', 'rat',
'iz', 'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
setattr(self, attr, np.array(getattr(args, attr).contents[:]))
# some data needs recasting, reformatting
self.nnnn_out = bool(self.nnnn_out)
self.json_out = bool(self.json_out)
self.verbose = bool(self.verbose)
self.rat = self.rat.reshape((2+FEFF_maxleg, 3)).transpose()*BOHR
if __name__ == '__main__':
path = ScatteringPath(phase_file='phase.pad')
path.set_absorber( x=0.01, y=0.1, z=0.01)
path.add_scatterer(x=1.8058, y=0.005, z=1.8063, ipot=1)
path.degen = 12
path.calculate_xafs()
print('# Calculate EXAFS with PhaseFile: {:s}'.format(path.phase_file))
print('# Path Geometry: \n# IPOT IZ X Y Z')
for i in range(path.nleg):
ipot = path.ipot[i]
iz = path.iz[ipot]
rat = path.rat[:,i]
print("# %2i %2i %8.4f %8.4f %8.4f" % (ipot,iz, rat[0], rat[1], rat[2]))
print("# Polarization: {:d}, ellipticity={:4f}".format(path.ipol, path.ellip))
print("# Polarization E Vector = {:s}".format(", ".join(["%.4f" % a for a in path.evec])))
print("# Polarization X Vector = {:s}".format(", ".join(["%.4f" % a for a in path.xivec])))
print("# Path Settings")
for attr in ('rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach'):
print("# {:8s} = {:+4f} ".format(attr, getattr(path, attr)))
for attr in ('exch_label', 'genfmt_version'):
print("# {:8s} = {:s} ".format(attr, getattr(path, attr)))
print("Path settings: degen=%10.5f, xmu=%10.5f, kf=%10.5f" % (path.degen, path.xmu, path.kf))
npts = 1 + max(np.where(path.kfeff > 0)[0])
print("# k rep real_phc phase_feff mag_feff red_factor lambda ")
fmt = " %6.3f %11.7f %11.7f %11.7f %11.7f %11.7f %11.7f"
for i in range(int(npts/3.0)):
print(fmt % (path.kfeff[i], path.rep[i], path.real_phc[i], path.pha_feff[i],
path.mag_feff[i], path.red_fact[i], path.lam[i]))
# print(fmt.format(path.kfeff[i], path.rep[i], path.real_phc[i],
# path.mag_feff[i], path.red_fact[i], path.lam[i])) | # calculate basic (unaltered) XAFS contributions
path.calcuate_xafs() | random_line_split |
feffpath.py | #!/usr/bin/python
"""
Feff85L EXAFS Scatternig Path for Python
"""
from __future__ import print_function
import os
import sys
import ctypes
from ctypes import POINTER, pointer, c_int, c_long, c_char_p, c_double
import numpy as np
def load_feff8lpath():
dllform = 'lib{:s}.so'
pathsep = ':'
loadlib = ctypes.cdll
if sys.platform.lower().startswith('darwin'):
dllform = 'lib{:s}.dylib'
if os.name == 'nt':
dllform = '{:s}.dll'
pathsep = ';'
loadlib = ctypes.windll
dllname = dllform.format('feff8lpath')
spath = ['.']
spath.extend(os.environ.get('LD_LIBRARY_PATH','').split(pathsep))
spath.extend(os.environ.get('PATH', '').split(pathsep))
spath.extend(['../../local_install/lib/', '../../src/GENFMT/lib'])
for dname in spath:
fullname = os.path.join(dname, dllname)
if os.path.exists(fullname):
return loadlib.LoadLibrary(fullname)
return None
FLIB = load_feff8lpath()
FEFF_maxpts = 150 # nex
FEFF_maxpot = 11 # nphx
FEFF_maxleg = 9 # legtot
BOHR = 0.5291772490
# bytes/str conversion
str2bytes = bytes2str = str
if sys.version_info[0] == 3:
def bytes2str(val):
if isinstance(val, str):
return val
if isinstance(val, bytes):
return str(val, 'utf-8')
return str(val)
def str2bytes(val):
if isinstance(val, bytes):
return val
return bytes(val, 'utf-8')
def with_phase_file(fcn):
"""decorator to ensure that the wrapped function either
has a non-None 'phase_file' argument or that that
self.phase_file is not None
"""
errmsg = "function '%s' needs a non-None phase_file"
def wrapper(*args, **keywords):
"needs phase_file"
phase_file = keywords.get('phase_file', None)
if phase_file is None:
phase_file = getattr(args[0], 'phase_file', None)
if phase_file is None:
raise AttributeError(errmsg % fcn.__name__)
else:
setattr(args[0], 'phase_file', phase_file)
# raise Warning(errmsg % fcn.__name__)
return fcn(*args, **keywords)
wrapper.__doc__ = fcn.__doc__
wrapper.__name__ = fcn.__name__
wrapper.__filename__ = fcn.__code__.co_filename
wrapper.__dict__.update(fcn.__dict__)
return wrapper
class ScatteringPath(object):
"""A Scatering Path for calculating a XAFS signal with Feff
A calculation requires a Potentials and Phase Shift calculation
in PAD format from Feff85, and a list of scattering paths
Usage:
------
# create path
path = ScatteringPath(phase_file='phase.pad')
# list 'ipot' and labels for absorber, scattererers
path.list_scatterers()
# set coords for absorbing atom
path.set_absorber(x=0., y=0., z=0.)
# add scattering atom
path.add_scatterer(x=1.5, y=1.5, z=1.5, ipot=1)
# calculate basic (unaltered) XAFS contributions
path.calcuate_xafs()
"""
def __init__(self, phase_file=None):
self.phase_file = phase_file
self.clear()
def clear(self):
"""reset all path data"""
self.index = 1
self.degen = 1.
self.nnnn_out = False
self.json_out = False
self.verbose = False
self.ipol = 0
self.ellip = 0.
self.nepts = 0
self.genfmt_order = 2
self.genfmt_vers = ""
self.exch_label = ""
self.rs = 0.
self.vint = 0.
self.xmu = 0.
self.edge = 0.
self.kf = 0.
self.rnorman = 0.
self.gamach = 0.
self.nepts = FEFF_maxpts
dargs = dict(dtype=np.float64, order='F')
largs = dict(dtype=np.int32, order='F')
self.evec = np.zeros(3, **dargs)
self.xivec = np.zeros(3, **dargs)
self.ipot = np.zeros(1+FEFF_maxleg, **largs)
self.beta = np.zeros(1+FEFF_maxleg, **dargs)
self.eta = np.zeros(2+FEFF_maxleg, **dargs)
self.ri = np.zeros(FEFF_maxleg, **dargs)
self.rat = np.zeros((3, 2+FEFF_maxleg), **dargs)
self.iz = np.zeros(1+FEFF_maxpot, **largs)
self.kfeff = np.zeros(FEFF_maxpts, **dargs)
self.real_phc = np.zeros(FEFF_maxpts, **dargs)
self.mag_feff = np.zeros(FEFF_maxpts, **dargs)
self.pha_feff = np.zeros(FEFF_maxpts, **dargs)
self.red_fact = np.zeros(FEFF_maxpts, **dargs)
self.lam = np.zeros(FEFF_maxpts, **dargs)
self.rep = np.zeros(FEFF_maxpts, **dargs)
self.nleg = 1
@with_phase_file
def list_scatterers(self, phase_file=None):
"""list Feff Potentials atoms ('ipots') fo phase file"""
atoms = []
with open(self.phase_file,'r') as fh:
line1_words = fh.readline().strip().split()
text = fh.readlines()
nphases = int(line1_words[4])
for line in text[4:]:
if line.startswith('$'): continue
words = line.split()
atoms.append((int(words[1]), words[2]))
if len(atoms) > nphases:
break
out = ["# Potential Z Symbol"]
for ipot, atom in enumerate(atoms):
out.append(" %2i %3i %s" % (ipot, atom[0], atom[1]))
return "\n".join(out)
@with_phase_file
def set_absorber(self, x=0., y=0., z=0., phase_file=None):
"""set coordinates for absorbing atom ('ipot'=0)"""
self.rat[0, 0] = x
self.rat[1, 0] = y
self.rat[2, 0] = z
@with_phase_file
def add_scatterer(self, x=0., y=0., z=0., ipot=1, phase_file=None):
self.rat[0, self.nleg] = x
self.rat[1, self.nleg] = y
self.rat[2, self.nleg] = z
self.ipot[self.nleg] = ipot
self.nleg += 1
# set final atom coords to same as absorber
self.rat[0, self.nleg] = self.rat[0, 0]
self.rat[1, self.nleg] = self.rat[1, 0]
self.rat[2, self.nleg] = self.rat[2, 0]
self.ipot[self.nleg] = self.ipot[0]
@with_phase_file
def calculate_xafs(self, phase_file=None):
class args: pass
# strings / char*. Note fixed length to match Fortran
args.phase_file = (self.phase_file + ' '*256)[:256]
args.exch_label = str2bytes(' '*8)
args.genfmt_version = str2bytes(' '*30)
# integers, including booleans
for attr in ('index', 'nleg', 'genfmt_order', 'ipol', 'nnnn_out',
'json_out', 'verbose', 'nepts'):
setattr(args, attr, pointer(c_long(int(getattr(self, attr)))))
# doubles
for attr in ('degen', 'rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman',
'gamach', 'ellip'):
setattr(args, attr, pointer(c_double(getattr(self, attr))))
# integer arrays
for attr in ('ipot', 'iz'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_int))
setattr(args, attr, cdata)
# double arrays
self.rat = self.rat/BOHR
for attr in ('evec', 'xivec', 'rat', 'ri', 'beta', 'eta',
'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_double))
setattr(args, attr, cdata)
x = FLIB.calc_onepath(args.phase_file, args.index, args.nleg,
args.degen, args.genfmt_order,
args.exch_label, args.rs, args.vint,
args.xmu, args.edge, args.kf, args.rnorman,
args.gamach, args.genfmt_version, args.ipot,
args.rat, args.iz, args.ipol, args.evec,
args.ellip, args.xivec, args.nnnn_out,
args.json_out, args.verbose, args.ri,
args.beta, args.eta, args.nepts, args.kfeff,
args.real_phc, args.mag_feff, args.pha_feff,
args.red_fact, args.lam, args.rep)
self.exch_label = bytes2str(args.exch_label).strip()
self.genfmt_version = bytes2str(args.genfmt_version).strip()
for attr in ('index', 'nleg', 'genfmt_order', 'degen', 'rs',
'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach',
'ipol', 'ellip', 'nnnn_out', 'json_out', 'verbose',
'nepts'):
setattr(self, attr, getattr(args, attr).contents.value)
for attr in ('ipot', 'evec', 'xivec', 'beta', 'eta', 'ri', 'rat',
'iz', 'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
setattr(self, attr, np.array(getattr(args, attr).contents[:]))
# some data needs recasting, reformatting
self.nnnn_out = bool(self.nnnn_out)
self.json_out = bool(self.json_out)
self.verbose = bool(self.verbose)
self.rat = self.rat.reshape((2+FEFF_maxleg, 3)).transpose()*BOHR
if __name__ == '__main__':
path = ScatteringPath(phase_file='phase.pad')
path.set_absorber( x=0.01, y=0.1, z=0.01)
path.add_scatterer(x=1.8058, y=0.005, z=1.8063, ipot=1)
path.degen = 12
path.calculate_xafs()
print('# Calculate EXAFS with PhaseFile: {:s}'.format(path.phase_file))
print('# Path Geometry: \n# IPOT IZ X Y Z')
for i in range(path.nleg):
ipot = path.ipot[i]
iz = path.iz[ipot]
rat = path.rat[:,i]
print("# %2i %2i %8.4f %8.4f %8.4f" % (ipot,iz, rat[0], rat[1], rat[2]))
print("# Polarization: {:d}, ellipticity={:4f}".format(path.ipol, path.ellip))
print("# Polarization E Vector = {:s}".format(", ".join(["%.4f" % a for a in path.evec])))
print("# Polarization X Vector = {:s}".format(", ".join(["%.4f" % a for a in path.xivec])))
print("# Path Settings")
for attr in ('rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach'):
print("# {:8s} = {:+4f} ".format(attr, getattr(path, attr)))
for attr in ('exch_label', 'genfmt_version'):
|
print("Path settings: degen=%10.5f, xmu=%10.5f, kf=%10.5f" % (path.degen, path.xmu, path.kf))
npts = 1 + max(np.where(path.kfeff > 0)[0])
print("# k rep real_phc phase_feff mag_feff red_factor lambda ")
fmt = " %6.3f %11.7f %11.7f %11.7f %11.7f %11.7f %11.7f"
for i in range(int(npts/3.0)):
print(fmt % (path.kfeff[i], path.rep[i], path.real_phc[i], path.pha_feff[i],
path.mag_feff[i], path.red_fact[i], path.lam[i]))
# print(fmt.format(path.kfeff[i], path.rep[i], path.real_phc[i],
# path.mag_feff[i], path.red_fact[i], path.lam[i]))
| print("# {:8s} = {:s} ".format(attr, getattr(path, attr))) | conditional_block |
feffpath.py | #!/usr/bin/python
"""
Feff85L EXAFS Scatternig Path for Python
"""
from __future__ import print_function
import os
import sys
import ctypes
from ctypes import POINTER, pointer, c_int, c_long, c_char_p, c_double
import numpy as np
def load_feff8lpath():
dllform = 'lib{:s}.so'
pathsep = ':'
loadlib = ctypes.cdll
if sys.platform.lower().startswith('darwin'):
dllform = 'lib{:s}.dylib'
if os.name == 'nt':
dllform = '{:s}.dll'
pathsep = ';'
loadlib = ctypes.windll
dllname = dllform.format('feff8lpath')
spath = ['.']
spath.extend(os.environ.get('LD_LIBRARY_PATH','').split(pathsep))
spath.extend(os.environ.get('PATH', '').split(pathsep))
spath.extend(['../../local_install/lib/', '../../src/GENFMT/lib'])
for dname in spath:
fullname = os.path.join(dname, dllname)
if os.path.exists(fullname):
return loadlib.LoadLibrary(fullname)
return None
FLIB = load_feff8lpath()
FEFF_maxpts = 150 # nex
FEFF_maxpot = 11 # nphx
FEFF_maxleg = 9 # legtot
BOHR = 0.5291772490
# bytes/str conversion
str2bytes = bytes2str = str
if sys.version_info[0] == 3:
def bytes2str(val):
if isinstance(val, str):
return val
if isinstance(val, bytes):
return str(val, 'utf-8')
return str(val)
def str2bytes(val):
if isinstance(val, bytes):
return val
return bytes(val, 'utf-8')
def with_phase_file(fcn):
"""decorator to ensure that the wrapped function either
has a non-None 'phase_file' argument or that that
self.phase_file is not None
"""
errmsg = "function '%s' needs a non-None phase_file"
def | (*args, **keywords):
"needs phase_file"
phase_file = keywords.get('phase_file', None)
if phase_file is None:
phase_file = getattr(args[0], 'phase_file', None)
if phase_file is None:
raise AttributeError(errmsg % fcn.__name__)
else:
setattr(args[0], 'phase_file', phase_file)
# raise Warning(errmsg % fcn.__name__)
return fcn(*args, **keywords)
wrapper.__doc__ = fcn.__doc__
wrapper.__name__ = fcn.__name__
wrapper.__filename__ = fcn.__code__.co_filename
wrapper.__dict__.update(fcn.__dict__)
return wrapper
class ScatteringPath(object):
"""A Scatering Path for calculating a XAFS signal with Feff
A calculation requires a Potentials and Phase Shift calculation
in PAD format from Feff85, and a list of scattering paths
Usage:
------
# create path
path = ScatteringPath(phase_file='phase.pad')
# list 'ipot' and labels for absorber, scattererers
path.list_scatterers()
# set coords for absorbing atom
path.set_absorber(x=0., y=0., z=0.)
# add scattering atom
path.add_scatterer(x=1.5, y=1.5, z=1.5, ipot=1)
# calculate basic (unaltered) XAFS contributions
path.calcuate_xafs()
"""
def __init__(self, phase_file=None):
self.phase_file = phase_file
self.clear()
def clear(self):
"""reset all path data"""
self.index = 1
self.degen = 1.
self.nnnn_out = False
self.json_out = False
self.verbose = False
self.ipol = 0
self.ellip = 0.
self.nepts = 0
self.genfmt_order = 2
self.genfmt_vers = ""
self.exch_label = ""
self.rs = 0.
self.vint = 0.
self.xmu = 0.
self.edge = 0.
self.kf = 0.
self.rnorman = 0.
self.gamach = 0.
self.nepts = FEFF_maxpts
dargs = dict(dtype=np.float64, order='F')
largs = dict(dtype=np.int32, order='F')
self.evec = np.zeros(3, **dargs)
self.xivec = np.zeros(3, **dargs)
self.ipot = np.zeros(1+FEFF_maxleg, **largs)
self.beta = np.zeros(1+FEFF_maxleg, **dargs)
self.eta = np.zeros(2+FEFF_maxleg, **dargs)
self.ri = np.zeros(FEFF_maxleg, **dargs)
self.rat = np.zeros((3, 2+FEFF_maxleg), **dargs)
self.iz = np.zeros(1+FEFF_maxpot, **largs)
self.kfeff = np.zeros(FEFF_maxpts, **dargs)
self.real_phc = np.zeros(FEFF_maxpts, **dargs)
self.mag_feff = np.zeros(FEFF_maxpts, **dargs)
self.pha_feff = np.zeros(FEFF_maxpts, **dargs)
self.red_fact = np.zeros(FEFF_maxpts, **dargs)
self.lam = np.zeros(FEFF_maxpts, **dargs)
self.rep = np.zeros(FEFF_maxpts, **dargs)
self.nleg = 1
@with_phase_file
def list_scatterers(self, phase_file=None):
"""list Feff Potentials atoms ('ipots') fo phase file"""
atoms = []
with open(self.phase_file,'r') as fh:
line1_words = fh.readline().strip().split()
text = fh.readlines()
nphases = int(line1_words[4])
for line in text[4:]:
if line.startswith('$'): continue
words = line.split()
atoms.append((int(words[1]), words[2]))
if len(atoms) > nphases:
break
out = ["# Potential Z Symbol"]
for ipot, atom in enumerate(atoms):
out.append(" %2i %3i %s" % (ipot, atom[0], atom[1]))
return "\n".join(out)
@with_phase_file
def set_absorber(self, x=0., y=0., z=0., phase_file=None):
"""set coordinates for absorbing atom ('ipot'=0)"""
self.rat[0, 0] = x
self.rat[1, 0] = y
self.rat[2, 0] = z
@with_phase_file
def add_scatterer(self, x=0., y=0., z=0., ipot=1, phase_file=None):
self.rat[0, self.nleg] = x
self.rat[1, self.nleg] = y
self.rat[2, self.nleg] = z
self.ipot[self.nleg] = ipot
self.nleg += 1
# set final atom coords to same as absorber
self.rat[0, self.nleg] = self.rat[0, 0]
self.rat[1, self.nleg] = self.rat[1, 0]
self.rat[2, self.nleg] = self.rat[2, 0]
self.ipot[self.nleg] = self.ipot[0]
@with_phase_file
def calculate_xafs(self, phase_file=None):
class args: pass
# strings / char*. Note fixed length to match Fortran
args.phase_file = (self.phase_file + ' '*256)[:256]
args.exch_label = str2bytes(' '*8)
args.genfmt_version = str2bytes(' '*30)
# integers, including booleans
for attr in ('index', 'nleg', 'genfmt_order', 'ipol', 'nnnn_out',
'json_out', 'verbose', 'nepts'):
setattr(args, attr, pointer(c_long(int(getattr(self, attr)))))
# doubles
for attr in ('degen', 'rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman',
'gamach', 'ellip'):
setattr(args, attr, pointer(c_double(getattr(self, attr))))
# integer arrays
for attr in ('ipot', 'iz'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_int))
setattr(args, attr, cdata)
# double arrays
self.rat = self.rat/BOHR
for attr in ('evec', 'xivec', 'rat', 'ri', 'beta', 'eta',
'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_double))
setattr(args, attr, cdata)
x = FLIB.calc_onepath(args.phase_file, args.index, args.nleg,
args.degen, args.genfmt_order,
args.exch_label, args.rs, args.vint,
args.xmu, args.edge, args.kf, args.rnorman,
args.gamach, args.genfmt_version, args.ipot,
args.rat, args.iz, args.ipol, args.evec,
args.ellip, args.xivec, args.nnnn_out,
args.json_out, args.verbose, args.ri,
args.beta, args.eta, args.nepts, args.kfeff,
args.real_phc, args.mag_feff, args.pha_feff,
args.red_fact, args.lam, args.rep)
self.exch_label = bytes2str(args.exch_label).strip()
self.genfmt_version = bytes2str(args.genfmt_version).strip()
for attr in ('index', 'nleg', 'genfmt_order', 'degen', 'rs',
'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach',
'ipol', 'ellip', 'nnnn_out', 'json_out', 'verbose',
'nepts'):
setattr(self, attr, getattr(args, attr).contents.value)
for attr in ('ipot', 'evec', 'xivec', 'beta', 'eta', 'ri', 'rat',
'iz', 'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
setattr(self, attr, np.array(getattr(args, attr).contents[:]))
# some data needs recasting, reformatting
self.nnnn_out = bool(self.nnnn_out)
self.json_out = bool(self.json_out)
self.verbose = bool(self.verbose)
self.rat = self.rat.reshape((2+FEFF_maxleg, 3)).transpose()*BOHR
if __name__ == '__main__':
path = ScatteringPath(phase_file='phase.pad')
path.set_absorber( x=0.01, y=0.1, z=0.01)
path.add_scatterer(x=1.8058, y=0.005, z=1.8063, ipot=1)
path.degen = 12
path.calculate_xafs()
print('# Calculate EXAFS with PhaseFile: {:s}'.format(path.phase_file))
print('# Path Geometry: \n# IPOT IZ X Y Z')
for i in range(path.nleg):
ipot = path.ipot[i]
iz = path.iz[ipot]
rat = path.rat[:,i]
print("# %2i %2i %8.4f %8.4f %8.4f" % (ipot,iz, rat[0], rat[1], rat[2]))
print("# Polarization: {:d}, ellipticity={:4f}".format(path.ipol, path.ellip))
print("# Polarization E Vector = {:s}".format(", ".join(["%.4f" % a for a in path.evec])))
print("# Polarization X Vector = {:s}".format(", ".join(["%.4f" % a for a in path.xivec])))
print("# Path Settings")
for attr in ('rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach'):
print("# {:8s} = {:+4f} ".format(attr, getattr(path, attr)))
for attr in ('exch_label', 'genfmt_version'):
print("# {:8s} = {:s} ".format(attr, getattr(path, attr)))
print("Path settings: degen=%10.5f, xmu=%10.5f, kf=%10.5f" % (path.degen, path.xmu, path.kf))
npts = 1 + max(np.where(path.kfeff > 0)[0])
print("# k rep real_phc phase_feff mag_feff red_factor lambda ")
fmt = " %6.3f %11.7f %11.7f %11.7f %11.7f %11.7f %11.7f"
for i in range(int(npts/3.0)):
print(fmt % (path.kfeff[i], path.rep[i], path.real_phc[i], path.pha_feff[i],
path.mag_feff[i], path.red_fact[i], path.lam[i]))
# print(fmt.format(path.kfeff[i], path.rep[i], path.real_phc[i],
# path.mag_feff[i], path.red_fact[i], path.lam[i]))
| wrapper | identifier_name |
main.rs | use actix_web::{web, App, HttpResponse, HttpServer, Responder};
use rand::Rng;
use serde::Serialize;
use std::cmp::Ordering;
use std::io;
fn index() -> impl Responder {
let my_string = String::from("Rust Async");
// first_word works on slices of `String`s
let _word = first_word(&my_string[..]);
let my_string_literal = "Rust Async";
// first_word works on slices of string literals
let _word = first_word(&my_string_literal[..]);
// Because string literals *are* string slices already,
// this works too, without the slice syntax!
let _word = first_word(my_string_literal);
println!(" word: {}", _word);
HttpResponse::Ok().body(_word)
}
#[derive(Serialize)]
struct Country {
country_code: String,
country_name: String,
}
fn get_country_list() -> impl Responder {
let mut vec: Vec<Country> = Vec::new();
vec.push(Country {
country_code: "PH".to_string(),
country_name: "Philippines".to_string(),
});
vec.push(Country {
country_code: "MY".to_string(),
country_name: "Malaysia".to_string(),
});
vec.push(Country {
country_code: "ID".to_string(),
country_name: "Indonesia".to_string(),
});
return web::Json(vec);
}
fn guess_num() {
println!("Guess the number!");
let secret_number = rand::thread_rng().gen_range(1, 101);
loop {
println!("Please input your guess.");
let mut guess = String::new();
io::stdin()
.read_line(&mut guess)
.expect("Failed to read line");
let guess: u32 = match guess.trim().parse() {
Ok(num) => num,
Err(_) => continue,
};
println!("You guessed: {}", guess);
match guess.cmp(&secret_number) {
Ordering::Less => println!("Too small!"),
Ordering::Greater => println!("Too big!"),
Ordering::Equal => {
println!("You win!");
break;
}
}
}
println!("The secret number is: {}", secret_number);
}
fn do_compound() {
let x: (i32, f64, u8) = (500, 6.4, 1);
let five_hundred = x.0;
let six_point_four = x.1;
let one = x.2;
println!(
"five_hundred: {}, six_point_four:{}, other:{}",
five_hundred, six_point_four, one
);
let a: [i32; 5] = [1, 2, 3, 4, 5];
println!(" Array element :{}", a[0]);
}
fn first_word(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &s[0..i];
}
}
&s[..]
}
fn string_slice() {
let my_string = String::from("Rust Async");
// first_word works on slices of `String`s
let _word = first_word(&my_string[..]);
let my_string_literal = "Rust Async";
// first_word works on slices of string literals
let _word = first_word(&my_string_literal[..]);
// Because string literals *are* string slices already,
// this works too, without the slice syntax!
let _word = first_word(my_string_literal);
println!(" word: {}", _word)
}
use std::collections::HashMap;
fn do_map() {
let mut map = HashMap::new();
map.insert(1, 2);
println!("map :{:?}", map);
let teams = vec![String::from("Blue"), String::from("Yellow")];
let initial_scores = vec![10, 50];
let mut scores: HashMap<_, _> = teams.iter().zip(initial_scores.iter()).collect();
println!("scores map :{:?}", scores);
for (key, value) in &scores {
println!("key:{}: value: {}", key, value);
}
let team_name = String::from("Blue");
println! {"team name : {:?}", scores.get(&team_name)};
let text = "hello world wonderful world";
let mut map = HashMap::new();
for word in text.split_whitespace() {
let count = map.entry(word).or_insert(10);
//println!("word: {}", word);
*count += 1;
println!("count:{}", *count);
}
println!("{:?}", map);
//
let mut s = String::from("你好");
s.push_str(", Bruce Li!");
s.push('耶');
println!("{}", s);
let s1 = String::from("Rust, ");
let s2 = String::from("faster!");
//// note s1 has been moved here and can no longer be used
let s3 = s1 + &s2;
println!("s3:{}", s3);
do_string();
}
fn do_string() {
let s1 = String::from("tic");
let s2 = String::from("tac");
let s3 = String::from("toe");
let s = s1 + "-" + &s2 + "-" + &s3;
println!("s: {}", s);
let s4 = String::from("suffix!");
let s = format!("{}-{}-{}", s2, s3, s4);
println!("s: {}", s);
//.bytes() //raw number
// for c in s.chars() {
// println!("{}", c);
// }
}
fn do_err() {
use std::fs::File;
//other way: let f = File::open("hello.txt").unwrap();
//let f = File::open("hello.txt").expect("Failed to open hello.txt");
let f = File::open("README.md");
let f = match f {
Ok(file) => file,
Err(error) => panic!("Problem opening the file: {:?}", error),
};
//A Shortcut for Propagating Errors: the ? Operator
}
fn largest(list: &[i32]) -> i32 {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
//Another way we could implement largest is for the function to
// return a reference to a T value in the slice. I
fn get_gt<T: PartialOrd + Copy>(list: &[T]) -> T {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
struct Point<T, U> {
x: T,
y: U,
}
impl<T, U> Point<T, U> {
fn mixup<V, W>(self, other: Point<V, W>) -> Point<T, W> {
Point {
x: self.x,
y: other.y,
}
}
}
fn do_trait() {
let number_list = vec![34, 50, 25, 100, 65];
let result = get_gt(&number_list);
println!("The largest number is {}", result);
let char_list = vec!['y', 'm', 'a', 'q'];
let result = get_gt(&char_list);
println!("The largest char is {}", result);
}
fn do_generic() {
let number_list = vec![34, 50, 25, 100, 65];
let result = largest(&number_list);
println!("The largest number is {}", result);
let number_list = vec![102, 34, 6000, 89, 54, 2, 43, 8];
let result = largest(&number_list);
println!("The largest number is {}", result);
let p1 = Point { x: 5, y: 10.4 };
let p2 = Point { x: "Hello", y: 'c' };
let p3 = p1.mixup(p2);
println!("p3.x = {}, p3.y = {}", p3.x, p3.y);
do_trait()
}
fn do_closure() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
let total: i32 = v1_iter.sum();
assert_eq!(total, 6);
let v1: Vec<i32> = vec![1, 2, 3];
let v2: Vec<_> = v1.iter().map(|x| x + 1).collect();
assert_eq!(v2, vec![2, 3, 4]);
guessing_number::run_shoes_test();
guessing_number::calling_next_directly();
}
fn do_smart_p() {
let x = 5;
let y = &x;
assert_eq!(5, x);
assert_eq!(5, *y);
let x1 = 5;
let y1 = Box::new(x);
assert_eq!(5, x1);
assert_eq!(5, *y1);
}
fn do_concurrency() {
use std::thread;
use std::time::Duration;
let handle = thread::spawn(|| {
for i in 1..6 {
println!("hi number {} from the spawned thread!", i);
thread::sleep(Duration::from_millis(1));
}
});
for i in 1..5 {
println!("hi number {} from the main thread!", i);
thread::sleep(Duration::from_millis(1));
}
handle.join().unwrap();
do_concurrency1();
}
fn do_concurrency1() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let vals = vec![
String::from("你好!"),
String::from("你去做什么?"),
String::from("Why?"),
String::from("那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
// thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
do_concurrency2();
do_concurrency3();
do_match()
}
fn do_match_p() {
println!("one");
}
fn do_match() {
let x = 1;
match x {
1 => do_match_p(),
2 => println!("two"),
3 => println!("three"),
_ => println!("anything"),
}
//Matching Named Variables
let x = Some(5);
let y = 10;
match x {
Some(50) => println!("Got 50"),
Some(y) => println!("Matched, y = {:?}", y),
_ => println!("Default case, x = {:?}", x),
}
println!("at the end: x = {:?}, y = {:?}", x, y);
let x = 1;
match x {
1 | 2 => println!("one or two"),
3 => println!("three"),
_ => println!("anything"),
}
let x = 2;
match x {
1...5 => println!("one through five"),
_ => println!("something else"),
}
let x = 'A';
match x {
'a'...'j' => println!("early ASCII letter"),
'k'...'z' => println!("late ASCII letter"),
'A'...'Z' => println!("UP ASCII letter"),
_ => println!("something else"),
}
//Destructuring to Break Apart Values
let p = Point { x: 0, y: 7 };
match p {
Point { x, y: 0 } => println!("On the x axis at {}", x),
Point { x: 0, y } => println!("On the y axis at {}", y),
Point { x, y } => println!("On neither axis: ({}, {})", x, y),
}
let msg = Message::ChangeColor(Color::Hsv(0, 160, 255));
match msg {
Message::ChangeColor(Color::Rgb(r, g, b)) => {
println!("Change the color to red {}, green {}, and blue {}", r, g, b)
}
Message::ChangeColor(Color::Hsv(h, s, v)) => println!(
"Change the color to hue {}, saturation {}, and value {}",
h, s, v
),
_ => (),
}
//bind
do_match1();
//Rust's unsafe code
do_unsafe();
}
//Rust unsafe code demo
fn do_unsafe() {
//doesn’t enforce these memory safety guarantees.
//Gaining extra superpowers.
//You can take four actions in unsafe Rust
//Dereference a raw pointer
//Call an unsafe function or method
//Access or modify a mutable static variable
//Implement an unsafe trait
}
fn do_match1() {
let msg = MessageNum::Hello { id: 5 };
match msg {
MessageNum::Hello {
id: id_variable @ 3...7,
} => println!("Found an id in range: {}", id_variable),
MessageNum::Hello { id: 10...12 } => println!("Found an id in another range"),
MessageNum::Hello { id } => println!("Found some other id: {}", id),
}
}
enum MessageNum {
Hello { id: i32 },
}
enum Color {
Rgb(i32, i32, i32),
Hsv(i32, i32, i32),
}
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(Color),
}
//Similarities Between RefCell<T>/Rc<T> and Mutex<T>/Arc<T>
fn do_concurrency2() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
//VIP: producer and consumer model
let (tx, rx) = mpsc::channel();
let tx1 = mpsc::Sender::clone(&tx);
thread::spawn(move || {
let vals = vec![
String::from("1:你好!"),
String::from("1:你去做什么?"),
String::from("1:Why?"),
String::from("1:那很好呀!"),
];
for val in vals {
tx1.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
thread::spawn(move || {
let vals = vec![
String::from("2:你好!"),
String::from("2:你去做什么?"),
String::from("2:Why?"),
String::from("2:那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
}
fn do_concurrency3() {
use std::sync::{Arc, Mutex};
use std::thread;
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
println!("Result: {}", *counter.lock().unwrap());
handle.join().unwrap();
}
println!("Result: {}", *counter.lock().unwrap());
}
trait Show {
fn show(&self) -> String;
}
impl Show for i32 {
fn show(&self) -> String {
format!("i32 value : {}", self)
}
}
impl Show for f64 {
fn show(&self) -> String {
format!("f64 value : {}", self)
}
}
trait Quack {
fn quack(&self);
}
struct Duck();
impl Quack for Duck {
fn quack(&self) {
println!("quack!");
}
}
struct RandomBird {
is_a_parrot: bool,
}
impl Quack for RandomBird {
fn quack(&self) {
if !self.is_a_parrot {
println!("quack!");
} else {
println!("squawk!");
}
}
}
// and why the hell not!
impl Quack for i32 {
fn quack(&self) {
for i in 0..*self {
print!("quack {} ", i);
}
println!();
}
}
trait Name {
fn name(&self) -> String;
fn upper_case(&self) -> String {
self.name().to_uppercase()
}
}
struct Toy();
impl Name for Toy {
fn name(&self) -> String {
"Toy".to_string()
}
}
fn quack() {
let duck1 = Duck();
let duck2 = RandomBird { is_a_parrot: false };
let parrot = RandomBird { is_a_parrot: true };
let i = 4;
let ducks: Vec<&Quack> = vec![&duck1, &duck2, &parrot, &i];
for d in &ducks {
d.quack();
}
let t = Toy();
assert_eq!(t.name(), "Toy".to_string());
assert_eq!(t.upper_case(), "TOY".to_string());
}
fn do_oop() {
let nvalue = Box::new(78);
let fvalue = Box::new(98.88);
let vc: Vec<Box<Show>> = vec![nvalue, fvalue];
for d in &vc {
println!("show {}", d.show());
}
//oop interface
quack();
}
fn do_float() {
let x = 2.0; // f64
let y: f32 = 3.0; // f32
println!("x:{}, y:{} ", x, y);
do_compound();
//expression
println!("zero number ; {}", zero_plus(23));
let a = [10, 20];
for element in a.iter() {
println!("the value is: {}", element);
}
for number in (1..4).rev() {
print!("{}, ", number);
}
//slice
let s = String::from("The Rust Programming Language");
let s1 = &s;
let s2 = &s;
println!("s1: {}, s2: {}", s1, s2);
let s3 = &s;
println!("s3: {}", s3);
string_slice();
do_struct();
do_map();
//do_err();
do_generic();
do_closure();
do_smart_p();
do_concurrency();
do_oop();
}
fn zero_plus(i: i32) -> i32 {
0 + i
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
//fn area(r: &Rectangle) -> u32 {
// r.height * r.width
//}
impl Rectangle {
fn area(&self) -> u32 {
self.height * self.width
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
fn square(size: u32) -> Rectangle {
Rectangle {
width: size,
height: size,
}
}
}
fn do_struct() {
let rect1 = Rectangle {
width: 20,
height: 50,
};
let rect2 = Rectangle {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("rect1 area: {}", rect1.area());
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
println!("rect1: {:?}", &(Rectangle::square(3)));
// println!(
// "The area of the rectangle is {} square pixels.",
// area(&rect1)
// );
// println!("rect1: {:?}", &rect1);
}
fn do_init() {
//mut and default immutable
let mut i = 0;
println!("init i :{}", i);
i = 100;
println!("change i: {}", i);
// const declare
const MAX_POINTS: u32 = 100_000;
println!("constant variable MAX_POINT: {}", MAX_POINTS);
//shadowing
let x = 5;
let x = x + 1;
let x = x * 2;
println!("The value of x is: {}", x);
let spaces = " ";
let spaces = spaces.len();
println!("space number :{}", spaces);
// floating-point numbers
do_float();
//guess_num()
}
use std::fmt;
fn show_item<T: fmt::Display>(item: T) {
println!("Item: {}", item);
}
struct CanDisplay;
impl fmt::Display for CanDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CanDisplay")
}
}
struct AlsoDisplay;
impl fmt::Display for AlsoDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "AlsoDisplay")
}
}
//1. Static Dispatch
fn do_static_dispatch() {
let a: CanDisplay = CanDisplay;
let b: AlsoDisplay = AlsoDisplay;
show_item(a); // stdout `Item: CanDisplay`
show_item(b); // stdout `Item: AlsoDisplay`
}
fn get_numbers(a: u32, b: u32) -> impl Iterator<Item = u32> {
(a..b).filter(|x| x % 100 == 0)
}
//2. Dynamic Dispatch
// impl trait
fn do_advanced_trait() {
for n in get_numbers(100, 1001) {
print!("{} \t", n);
}
}
//3. Specifying Placeholder Types in Trait Definitions with Associated Types
// pub trait Iterator {
// type Item;
// fn next(&mut self) -> Option<Self::Item>; | // }
//// Item is the placeholder type.
///
// 4. Fully Qualified Syntax for Disambiguation: Calling Methods with the Same Name
trait Pilot {
fn fly(&self);
}
trait Wizard {
fn fly(&self);
}
struct Human;
impl Pilot for Human {
fn fly(&self) {
println!("This is your captain speaking. Pilot!");
}
}
impl Wizard for Human {
fn fly(&self) {
println!("Wizard, up!");
}
}
impl Human {
fn fly(&self) {
println!("*waving arms furiously*");
}
}
fn do_advanced_trait2() {
let person = Human;
Pilot::fly(&person);
Wizard::fly(&person);
person.fly();
}
trait Animal {
fn baby_name() -> String;
}
struct Dog;
impl Dog {
fn baby_name() -> String {
String::from("Spot")
}
}
impl Animal for Dog {
fn baby_name() -> String {
String::from("puppy")
}
}
fn do_advanced_trait3() {
println!("A baby dog is called a {}", Dog::baby_name());
println!("A baby dog is called a {}", <Dog as Animal>::baby_name());
}
trait OutlinePrint: fmt::Display {
fn outline_print(&self) {
let output = self.to_string();
let len = output.len();
println!("{}", "*".repeat(len + 4));
println!("*{}*", " ".repeat(len + 2));
println!("* {} *", output);
println!("*{}*", " ".repeat(len + 2));
println!("{}", "*".repeat(len + 4));
}
}
struct PointXY {
x: i32,
y: i32,
}
impl OutlinePrint for PointXY {}
impl fmt::Display for PointXY {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
//5. Using Super-traits to Require One Trait’s Functionality Within Another Trait
fn do_advanced_trait4() {
let xy = PointXY { x: 10, y: 30 };
xy.outline_print();
}
//6. Using the New-type Pattern to Implement External Traits on External Types
struct Wrapper(Vec<String>);
impl fmt::Display for Wrapper {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{}]", self.0.join(", "))
}
}
fn do_advanced_trait5() {
let w = Wrapper(vec![String::from("Hi, "), String::from("Rust!")]);
println!("w = {}", w);
}
fn do_trait_dispatch() {
do_static_dispatch();
do_advanced_trait();
do_advanced_trait2();
do_advanced_trait3();
do_advanced_trait4();
do_advanced_trait5();
}
use std::ops::Deref;
struct MyBox<T>(T);
impl<T> MyBox<T> {
fn new(x: T) -> MyBox<T> {
MyBox(x)
}
}
impl<T> Deref for MyBox<T> {
type Target = T;
fn deref(&self) -> &T {
&self.0
}
}
impl<T> Drop for MyBox<T> {
fn drop(&mut self) {
println!("dropping MyBox object from memory... ");
}
}
fn do_smart_pointer() {
//1. Using a Box<T> to store data on the heap
let x = Box::new(5);
println!("Box<T> on the heap , x = {}", x);
//2. Using Box<T> Like a Reference
let x = 5;
//let y = &x;
let y = Box::new(x);
assert_eq!(5, x);
assert_eq!(5, *y);
let x = 50;
assert_eq!(50, x);
let y = MyBox::new(x);
//drop method.
drop(y);
MyBox::new("Hello");
}
fn startup_web_server() {
println!("\nStartup Web Server...");
HttpServer::new(|| {
App::new()
.route("/", web::get().to(index))
.service(web::resource("/countries").route(web::get().to(get_country_list)))
})
.bind("0.0.0.0:9090")
.unwrap()
.run()
.unwrap();
println!(">>>exit");
}
use std::io::prelude::*;
use std::net::TcpStream;
use std::string::String;
static HTML: &str = "<!DOCTYPE html><html lang=\"en\"> <head><meta charset=\"utf-8\"><title>Hello!</title></head><body> <h1>Hello!</h1> <p>Hi from Rust</p></body></html>";
fn handle_connection(mut stream: TcpStream) {
let mut buffer = [0; 512];
stream.read(&mut buffer).unwrap();
let get = b"GET / HTTP/1.1\r\n";
let (status_line, content) = if buffer.starts_with(get) {
("HTTP/1.1 200 OK\r\n\r\n", HTML)
} else {
("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.....")
};
let response = format!("{}{}", status_line, content);
stream.write_all(response.as_bytes()).unwrap();
stream.flush().unwrap();
}
fn startup_multiple_threads_server() {
use guessing_number::ThreadPool;
use std::net::TcpListener;
let pool = ThreadPool::new(4);
let listener = TcpListener::bind("0.0.0.0:7878").unwrap();
for stream in listener.incoming() {
let stream = stream.unwrap();
println!("req ...");
pool.execute(|| {
handle_connection(stream);
});
}
}
fn main() {
do_init();
do_trait_dispatch();
do_smart_pointer();
//startup_multiple_threads_server();
//startup_web_server();
} | random_line_split | |
main.rs | use actix_web::{web, App, HttpResponse, HttpServer, Responder};
use rand::Rng;
use serde::Serialize;
use std::cmp::Ordering;
use std::io;
fn index() -> impl Responder {
let my_string = String::from("Rust Async");
// first_word works on slices of `String`s
let _word = first_word(&my_string[..]);
let my_string_literal = "Rust Async";
// first_word works on slices of string literals
let _word = first_word(&my_string_literal[..]);
// Because string literals *are* string slices already,
// this works too, without the slice syntax!
let _word = first_word(my_string_literal);
println!(" word: {}", _word);
HttpResponse::Ok().body(_word)
}
#[derive(Serialize)]
struct Country {
country_code: String,
country_name: String,
}
fn get_country_list() -> impl Responder {
let mut vec: Vec<Country> = Vec::new();
vec.push(Country {
country_code: "PH".to_string(),
country_name: "Philippines".to_string(),
});
vec.push(Country {
country_code: "MY".to_string(),
country_name: "Malaysia".to_string(),
});
vec.push(Country {
country_code: "ID".to_string(),
country_name: "Indonesia".to_string(),
});
return web::Json(vec);
}
fn guess_num() {
println!("Guess the number!");
let secret_number = rand::thread_rng().gen_range(1, 101);
loop {
println!("Please input your guess.");
let mut guess = String::new();
io::stdin()
.read_line(&mut guess)
.expect("Failed to read line");
let guess: u32 = match guess.trim().parse() {
Ok(num) => num,
Err(_) => continue,
};
println!("You guessed: {}", guess);
match guess.cmp(&secret_number) {
Ordering::Less => println!("Too small!"),
Ordering::Greater => println!("Too big!"),
Ordering::Equal => {
println!("You win!");
break;
}
}
}
println!("The secret number is: {}", secret_number);
}
fn do_compound() {
let x: (i32, f64, u8) = (500, 6.4, 1);
let five_hundred = x.0;
let six_point_four = x.1;
let one = x.2;
println!(
"five_hundred: {}, six_point_four:{}, other:{}",
five_hundred, six_point_four, one
);
let a: [i32; 5] = [1, 2, 3, 4, 5];
println!(" Array element :{}", a[0]);
}
fn first_word(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &s[0..i];
}
}
&s[..]
}
fn string_slice() {
let my_string = String::from("Rust Async");
// first_word works on slices of `String`s
let _word = first_word(&my_string[..]);
let my_string_literal = "Rust Async";
// first_word works on slices of string literals
let _word = first_word(&my_string_literal[..]);
// Because string literals *are* string slices already,
// this works too, without the slice syntax!
let _word = first_word(my_string_literal);
println!(" word: {}", _word)
}
use std::collections::HashMap;
fn do_map() {
let mut map = HashMap::new();
map.insert(1, 2);
println!("map :{:?}", map);
let teams = vec![String::from("Blue"), String::from("Yellow")];
let initial_scores = vec![10, 50];
let mut scores: HashMap<_, _> = teams.iter().zip(initial_scores.iter()).collect();
println!("scores map :{:?}", scores);
for (key, value) in &scores {
println!("key:{}: value: {}", key, value);
}
let team_name = String::from("Blue");
println! {"team name : {:?}", scores.get(&team_name)};
let text = "hello world wonderful world";
let mut map = HashMap::new();
for word in text.split_whitespace() {
let count = map.entry(word).or_insert(10);
//println!("word: {}", word);
*count += 1;
println!("count:{}", *count);
}
println!("{:?}", map);
//
let mut s = String::from("你好");
s.push_str(", Bruce Li!");
s.push('耶');
println!("{}", s);
let s1 = String::from("Rust, ");
let s2 = String::from("faster!");
//// note s1 has been moved here and can no longer be used
let s3 = s1 + &s2;
println!("s3:{}", s3);
do_string();
}
fn do_string() {
let s1 = String::from("tic");
let s2 = String::from("tac");
let s3 = String::from("toe");
let s = s1 + "-" + &s2 + "-" + &s3;
println!("s: {}", s);
let s4 = String::from("suffix!");
let s = format!("{}-{}-{}", s2, s3, s4);
println!("s: {}", s);
//.bytes() //raw number
// for c in s.chars() {
// println!("{}", c);
// }
}
fn do_err() {
use std::fs::File;
//other way: let f = File::open("hello.txt").unwrap();
//let f = File::open("hello.txt").expect("Failed to open hello.txt");
let f = File::open("README.md");
let f = match f {
Ok(file) => file,
Err(error) => panic!("Problem opening the file: {:?}", error),
};
//A Shortcut for Propagating Errors: the ? Operator
}
fn largest(list: &[i32]) -> i32 {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
//Another way we could implement largest is for the function to
// return a reference to a T value in the slice. I
fn get_gt<T: PartialOrd + Copy>(list: &[T]) -> T {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
struct Point<T, U> {
x: T,
y: U,
}
impl<T, U> Point<T, U> {
fn mixup<V, W>(self, other: Point<V, W>) -> Point<T, W> {
Point {
x: self.x,
y: other.y,
}
}
}
fn do_trait() {
let number_list = vec![34, 50, 25, 100, 65];
let result = get_gt(&number_list);
println!("The largest number is {}", result);
let char_list = vec!['y', 'm', 'a', 'q'];
let result = get_gt(&char_list);
println!("The largest char is {}", result);
}
fn do_generic() {
let number_list = vec![34, 50, 25, 100, 65];
let result = largest(&number_list);
println!("The largest number is {}", result);
let number_list = vec![102, 34, 6000, 89, 54, 2, 43, 8];
let result = largest(&number_list);
println!("The largest number is {}", result);
let p1 = Point { x: 5, y: 10.4 };
let p2 = Point { x: "Hello", y: 'c' };
let p3 = p1.mixup(p2);
println!("p3.x = {}, p3.y = {}", p3.x, p3.y);
do_trait()
}
fn do_closure() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
let total: i32 = v1_iter.sum();
assert_eq!(total, 6);
let v1: Vec<i32> = vec![1, 2, 3];
let v2: Vec<_> = v1.iter().map(|x| x + 1).collect();
assert_eq!(v2, vec![2, 3, 4]);
guessing_number::run_shoes_test();
guessing_number::calling_next_directly();
}
fn do_smart_p() {
let x = 5;
let y = &x;
assert_eq!(5, x);
assert_eq!(5, *y);
let x1 = 5;
let y1 = Box::new(x);
assert_eq!(5, x1);
assert_eq!(5, *y1);
}
fn do_concurrency() {
use std::thread;
use std::time::Duration;
let handle = thread::spawn(|| {
for i in 1..6 {
println!("hi number {} from the spawned thread!", i);
thread::sleep(Duration::from_millis(1));
}
});
for i in 1..5 {
println!("hi number {} from the main thread!", i);
thread::sleep(Duration::from_millis(1));
}
handle.join().unwrap();
do_concurrency1();
}
fn do_concurrency1() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let vals = vec![
String::from("你好!"),
String::from("你去做什么?"),
String::from("Why?"),
String::from("那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
// thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
do_concurrency2();
do_concurrency3();
do_match()
}
fn do_match_p() {
println!("one");
}
fn do_match() {
let x = 1;
match x {
1 => do_match_p(),
2 => println!("two"),
3 => println!("three"),
_ => println!("anything"),
}
//Matching Named Variables
let x = Some(5);
let y = 10;
match x {
Some(50) => println!("Got 50"),
Some(y) => println!("Matched, y = {:?}", y),
_ => println!("Default case, x = {:?}", x),
}
println!("at the end: x = {:?}, y = {:?}", x, y);
let x = 1;
match x {
1 | 2 => println!("one or two"),
3 => println!("three"),
_ => println!("anything"),
}
let x = 2;
match x {
1...5 => println!("one through five"),
_ => println!("something else"),
}
let x = 'A';
match x {
'a'...'j' => println!("early ASCII letter"),
'k'...'z' => println!("late ASCII letter"),
'A'...'Z' => println!("UP ASCII letter"),
_ => println!("something else"),
}
//Destructuring to Break Apart Values
let p = Point { x: 0, y: 7 };
match p {
Point { x, y: 0 } => println!("On the x axis at {}", x),
Point { x: 0, y } => println!("On the y axis at {}", y),
Point { x, y } => println!("On neither axis: ({}, {})", x, y),
}
let msg = Message::ChangeColor(Color::Hsv(0, 160, 255));
match msg {
Message::ChangeColor(Color::Rgb(r, g, b)) => {
println!("Change the color to red {}, green {}, and blue {}", r, g, b)
}
Message::ChangeColor(Color::Hsv(h, s, v)) => println!(
"Change the color to hue {}, saturation {}, and value {}",
h, s, v
),
_ => (),
}
//bind
do_match1();
//Rust's unsafe code
do_unsafe();
}
//Rust unsafe code demo
fn do_unsafe() {
//doesn’t enforce these memory safety guarantees.
//Gaining extra superpowers.
//You can take four actions in unsafe Rust
//Dereference a raw pointer
//Call an unsafe function or method
//Access or modify a mutable static variable
//Implement an unsafe trait
}
fn do_match1() {
let msg = MessageNum::Hello { id: 5 };
match msg {
MessageNum::Hello {
id: id_variable @ 3...7,
} => println!("Found an id in range: {}", id_variable),
MessageNum::Hello { id: 10...12 } => println!("Found an id in another range"),
MessageNum::Hello { id } => println!("Found some other id: {}", id),
}
}
enum MessageNum {
Hello { id: i32 },
}
enum Color {
Rgb(i32, i32, i32),
Hsv(i32, i32, i32),
}
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(Color),
}
//Similarities Between RefCell<T>/Rc<T> and Mutex<T>/Arc<T>
fn do_concurrency2() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
//VIP: producer and consumer model
let (tx, rx) = mpsc::channel();
let tx1 = mpsc::Sender::clone(&tx);
thread::spawn(move || {
let vals = vec![
String::from("1:你好!"),
String::from("1:你去做什么?"),
String::from("1:Why?"),
String::from("1:那很好呀!"),
];
for val in vals {
tx1.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
thread::spawn(move || {
let vals = vec![
String::from("2:你好!"),
String::from("2:你去做什么?"),
String::from("2:Why?"),
String::from("2:那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
}
fn do_concurrency3() {
use std::sync::{Arc, Mutex};
use std::thread;
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
println!("Result: {}", *counter.lock().unwrap());
handle.join().unwrap();
}
println!("Result: {}", *counter.lock().unwrap());
}
trait Show {
fn show(&self) -> String;
}
impl Show for i32 {
fn show(&self) -> String {
format!("i32 value : {}", self)
}
}
impl Show for f64 {
fn show(&self) -> String {
format!("f64 value : {}", self)
}
}
trait Quack {
fn quack(& | );
}
struct Duck();
impl Quack for Duck {
fn quack(&self) {
println!("quack!");
}
}
struct RandomBird {
is_a_parrot: bool,
}
impl Quack for RandomBird {
fn quack(&self) {
if !self.is_a_parrot {
println!("quack!");
} else {
println!("squawk!");
}
}
}
// and why the hell not!
impl Quack for i32 {
fn quack(&self) {
for i in 0..*self {
print!("quack {} ", i);
}
println!();
}
}
trait Name {
fn name(&self) -> String;
fn upper_case(&self) -> String {
self.name().to_uppercase()
}
}
struct Toy();
impl Name for Toy {
fn name(&self) -> String {
"Toy".to_string()
}
}
fn quack() {
let duck1 = Duck();
let duck2 = RandomBird { is_a_parrot: false };
let parrot = RandomBird { is_a_parrot: true };
let i = 4;
let ducks: Vec<&Quack> = vec![&duck1, &duck2, &parrot, &i];
for d in &ducks {
d.quack();
}
let t = Toy();
assert_eq!(t.name(), "Toy".to_string());
assert_eq!(t.upper_case(), "TOY".to_string());
}
fn do_oop() {
let nvalue = Box::new(78);
let fvalue = Box::new(98.88);
let vc: Vec<Box<Show>> = vec![nvalue, fvalue];
for d in &vc {
println!("show {}", d.show());
}
//oop interface
quack();
}
fn do_float() {
let x = 2.0; // f64
let y: f32 = 3.0; // f32
println!("x:{}, y:{} ", x, y);
do_compound();
//expression
println!("zero number ; {}", zero_plus(23));
let a = [10, 20];
for element in a.iter() {
println!("the value is: {}", element);
}
for number in (1..4).rev() {
print!("{}, ", number);
}
//slice
let s = String::from("The Rust Programming Language");
let s1 = &s;
let s2 = &s;
println!("s1: {}, s2: {}", s1, s2);
let s3 = &s;
println!("s3: {}", s3);
string_slice();
do_struct();
do_map();
//do_err();
do_generic();
do_closure();
do_smart_p();
do_concurrency();
do_oop();
}
fn zero_plus(i: i32) -> i32 {
0 + i
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
//fn area(r: &Rectangle) -> u32 {
// r.height * r.width
//}
impl Rectangle {
fn area(&self) -> u32 {
self.height * self.width
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
fn square(size: u32) -> Rectangle {
Rectangle {
width: size,
height: size,
}
}
}
fn do_struct() {
let rect1 = Rectangle {
width: 20,
height: 50,
};
let rect2 = Rectangle {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("rect1 area: {}", rect1.area());
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
println!("rect1: {:?}", &(Rectangle::square(3)));
// println!(
// "The area of the rectangle is {} square pixels.",
// area(&rect1)
// );
// println!("rect1: {:?}", &rect1);
}
fn do_init() {
//mut and default immutable
let mut i = 0;
println!("init i :{}", i);
i = 100;
println!("change i: {}", i);
// const declare
const MAX_POINTS: u32 = 100_000;
println!("constant variable MAX_POINT: {}", MAX_POINTS);
//shadowing
let x = 5;
let x = x + 1;
let x = x * 2;
println!("The value of x is: {}", x);
let spaces = " ";
let spaces = spaces.len();
println!("space number :{}", spaces);
// floating-point numbers
do_float();
//guess_num()
}
use std::fmt;
fn show_item<T: fmt::Display>(item: T) {
println!("Item: {}", item);
}
struct CanDisplay;
impl fmt::Display for CanDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CanDisplay")
}
}
struct AlsoDisplay;
impl fmt::Display for AlsoDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "AlsoDisplay")
}
}
//1. Static Dispatch
fn do_static_dispatch() {
let a: CanDisplay = CanDisplay;
let b: AlsoDisplay = AlsoDisplay;
show_item(a); // stdout `Item: CanDisplay`
show_item(b); // stdout `Item: AlsoDisplay`
}
fn get_numbers(a: u32, b: u32) -> impl Iterator<Item = u32> {
(a..b).filter(|x| x % 100 == 0)
}
//2. Dynamic Dispatch
// impl trait
fn do_advanced_trait() {
for n in get_numbers(100, 1001) {
print!("{} \t", n);
}
}
//3. Specifying Placeholder Types in Trait Definitions with Associated Types
// pub trait Iterator {
// type Item;
// fn next(&mut self) -> Option<Self::Item>;
// }
//// Item is the placeholder type.
///
// 4. Fully Qualified Syntax for Disambiguation: Calling Methods with the Same Name
trait Pilot {
fn fly(&self);
}
trait Wizard {
fn fly(&self);
}
struct Human;
impl Pilot for Human {
fn fly(&self) {
println!("This is your captain speaking. Pilot!");
}
}
impl Wizard for Human {
fn fly(&self) {
println!("Wizard, up!");
}
}
impl Human {
fn fly(&self) {
println!("*waving arms furiously*");
}
}
fn do_advanced_trait2() {
let person = Human;
Pilot::fly(&person);
Wizard::fly(&person);
person.fly();
}
trait Animal {
fn baby_name() -> String;
}
struct Dog;
impl Dog {
fn baby_name() -> String {
String::from("Spot")
}
}
impl Animal for Dog {
fn baby_name() -> String {
String::from("puppy")
}
}
fn do_advanced_trait3() {
println!("A baby dog is called a {}", Dog::baby_name());
println!("A baby dog is called a {}", <Dog as Animal>::baby_name());
}
trait OutlinePrint: fmt::Display {
fn outline_print(&self) {
let output = self.to_string();
let len = output.len();
println!("{}", "*".repeat(len + 4));
println!("*{}*", " ".repeat(len + 2));
println!("* {} *", output);
println!("*{}*", " ".repeat(len + 2));
println!("{}", "*".repeat(len + 4));
}
}
struct PointXY {
x: i32,
y: i32,
}
impl OutlinePrint for PointXY {}
impl fmt::Display for PointXY {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
//5. Using Super-traits to Require One Trait’s Functionality Within Another Trait
fn do_advanced_trait4() {
let xy = PointXY { x: 10, y: 30 };
xy.outline_print();
}
//6. Using the New-type Pattern to Implement External Traits on External Types
struct Wrapper(Vec<String>);
impl fmt::Display for Wrapper {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{}]", self.0.join(", "))
}
}
fn do_advanced_trait5() {
let w = Wrapper(vec![String::from("Hi, "), String::from("Rust!")]);
println!("w = {}", w);
}
fn do_trait_dispatch() {
do_static_dispatch();
do_advanced_trait();
do_advanced_trait2();
do_advanced_trait3();
do_advanced_trait4();
do_advanced_trait5();
}
use std::ops::Deref;
struct MyBox<T>(T);
impl<T> MyBox<T> {
fn new(x: T) -> MyBox<T> {
MyBox(x)
}
}
impl<T> Deref for MyBox<T> {
type Target = T;
fn deref(&self) -> &T {
&self.0
}
}
impl<T> Drop for MyBox<T> {
fn drop(&mut self) {
println!("dropping MyBox object from memory... ");
}
}
fn do_smart_pointer() {
//1. Using a Box<T> to store data on the heap
let x = Box::new(5);
println!("Box<T> on the heap , x = {}", x);
//2. Using Box<T> Like a Reference
let x = 5;
//let y = &x;
let y = Box::new(x);
assert_eq!(5, x);
assert_eq!(5, *y);
let x = 50;
assert_eq!(50, x);
let y = MyBox::new(x);
//drop method.
drop(y);
MyBox::new("Hello");
}
fn startup_web_server() {
println!("\nStartup Web Server...");
HttpServer::new(|| {
App::new()
.route("/", web::get().to(index))
.service(web::resource("/countries").route(web::get().to(get_country_list)))
})
.bind("0.0.0.0:9090")
.unwrap()
.run()
.unwrap();
println!(">>>exit");
}
use std::io::prelude::*;
use std::net::TcpStream;
use std::string::String;
static HTML: &str = "<!DOCTYPE html><html lang=\"en\"> <head><meta charset=\"utf-8\"><title>Hello!</title></head><body> <h1>Hello!</h1> <p>Hi from Rust</p></body></html>";
fn handle_connection(mut stream: TcpStream) {
let mut buffer = [0; 512];
stream.read(&mut buffer).unwrap();
let get = b"GET / HTTP/1.1\r\n";
let (status_line, content) = if buffer.starts_with(get) {
("HTTP/1.1 200 OK\r\n\r\n", HTML)
} else {
("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.....")
};
let response = format!("{}{}", status_line, content);
stream.write_all(response.as_bytes()).unwrap();
stream.flush().unwrap();
}
fn startup_multiple_threads_server() {
use guessing_number::ThreadPool;
use std::net::TcpListener;
let pool = ThreadPool::new(4);
let listener = TcpListener::bind("0.0.0.0:7878").unwrap();
for stream in listener.incoming() {
let stream = stream.unwrap();
println!("req ...");
pool.execute(|| {
handle_connection(stream);
});
}
}
fn main() {
do_init();
do_trait_dispatch();
do_smart_pointer();
//startup_multiple_threads_server();
//startup_web_server();
}
| self | identifier_name |
main.rs | use actix_web::{web, App, HttpResponse, HttpServer, Responder};
use rand::Rng;
use serde::Serialize;
use std::cmp::Ordering;
use std::io;
fn index() -> impl Responder {
let my_string = String::from("Rust Async");
// first_word works on slices of `String`s
let _word = first_word(&my_string[..]);
let my_string_literal = "Rust Async";
// first_word works on slices of string literals
let _word = first_word(&my_string_literal[..]);
// Because string literals *are* string slices already,
// this works too, without the slice syntax!
let _word = first_word(my_string_literal);
println!(" word: {}", _word);
HttpResponse::Ok().body(_word)
}
#[derive(Serialize)]
struct Country {
country_code: String,
country_name: String,
}
fn get_country_list() -> impl Responder {
let mut vec: Vec<Country> = Vec::new();
vec.push(Country {
country_code: "PH".to_string(),
country_name: "Philippines".to_string(),
});
vec.push(Country {
country_code: "MY".to_string(),
country_name: "Malaysia".to_string(),
});
vec.push(Country {
country_code: "ID".to_string(),
country_name: "Indonesia".to_string(),
});
return web::Json(vec);
}
fn guess_num() {
println!("Guess the number!");
let secret_number = rand::thread_rng().gen_range(1, 101);
loop {
println!("Please input your guess.");
let mut guess = String::new();
io::stdin()
.read_line(&mut guess)
.expect("Failed to read line");
let guess: u32 = match guess.trim().parse() {
Ok(num) => num,
Err(_) => continue,
};
println!("You guessed: {}", guess);
match guess.cmp(&secret_number) {
Ordering::Less => println!("Too small!"),
Ordering::Greater => println!("Too big!"),
Ordering::Equal => {
println!("You win!");
break;
}
}
}
println!("The secret number is: {}", secret_number);
}
fn do_compound() {
let x: (i32, f64, u8) = (500, 6.4, 1);
let five_hundred = x.0;
let six_point_four = x.1;
let one = x.2;
println!(
"five_hundred: {}, six_point_four:{}, other:{}",
five_hundred, six_point_four, one
);
let a: [i32; 5] = [1, 2, 3, 4, 5];
println!(" Array element :{}", a[0]);
}
fn first_word(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &s[0..i];
}
}
&s[..]
}
fn string_slice() {
let my_string = String::from("Rust Async");
// first_word works on slices of `String`s
let _word = first_word(&my_string[..]);
let my_string_literal = "Rust Async";
// first_word works on slices of string literals
let _word = first_word(&my_string_literal[..]);
// Because string literals *are* string slices already,
// this works too, without the slice syntax!
let _word = first_word(my_string_literal);
println!(" word: {}", _word)
}
use std::collections::HashMap;
fn do_map() {
let mut map = HashMap::new();
map.insert(1, 2);
println!("map :{:?}", map);
let teams = vec![String::from("Blue"), String::from("Yellow")];
let initial_scores = vec![10, 50];
let mut scores: HashMap<_, _> = teams.iter().zip(initial_scores.iter()).collect();
println!("scores map :{:?}", scores);
for (key, value) in &scores {
println!("key:{}: value: {}", key, value);
}
let team_name = String::from("Blue");
println! {"team name : {:?}", scores.get(&team_name)};
let text = "hello world wonderful world";
let mut map = HashMap::new();
for word in text.split_whitespace() {
let count = map.entry(word).or_insert(10);
//println!("word: {}", word);
*count += 1;
println!("count:{}", *count);
}
println!("{:?}", map);
//
let mut s = String::from("你好");
s.push_str(", Bruce Li!");
s.push('耶');
println!("{}", s);
let s1 = String::from("Rust, ");
let s2 = String::from("faster!");
//// note s1 has been moved here and can no longer be used
let s3 = s1 + &s2;
println!("s3:{}", s3);
do_string();
}
fn do_string() {
let s1 = String::from("tic");
let s2 = String::from("tac");
let s3 = String::from("toe");
let s = s1 + "-" + &s2 + "-" + &s3;
println!("s: {}", s);
let s4 = String::from("suffix!");
let s = format!("{}-{}-{}", s2, s3, s4);
println!("s: {}", s);
//.bytes() //raw number
// for c in s.chars() {
// println!("{}", c);
// }
}
fn do_err() {
use std::fs::File;
//other way: let f = File::open("hello.txt").unwrap();
//let f = File::open("hello.txt").expect("Failed to open hello.txt");
let f = File::open("README.md");
let f = match f {
Ok(file) => file,
Err(error) => panic!("Problem opening the file: {:?}", error),
};
//A Shortcut for Propagating Errors: the ? Operator
}
fn largest(list: &[i32]) -> i32 {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
//Another way we could implement largest is for the function to
// return a reference to a T value in the slice. I
fn get_gt<T: PartialOrd + Copy>(list: &[T]) -> T {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
struct Point<T, U> {
x: T,
y: U,
}
impl<T, U> Point<T, U> {
fn mixup<V, W>(self, other: Point<V, W>) -> Point<T, W> {
Point {
x: self.x,
y: other.y,
}
}
}
fn do_trait() {
let number_list = vec![34, 50, 25, 100, 65];
let result = get_gt(&number_list);
println!("The largest number is {}", result);
let char_list = vec!['y', 'm', 'a', 'q'];
let result = get_gt(&char_list);
println!("The largest char is {}", result);
}
fn do_generic() {
let number_list = vec![34, 50, 25, 100, 65];
let result = largest(&number_list);
println!("The largest number is {}", result);
let number_list = vec![102, 34, 6000, 89, 54, 2, 43, 8];
let result = largest(&number_list);
println!("The largest number is {}", result);
let p1 = Point { x: 5, y: 10.4 };
let p2 = Point { x: "Hello", y: 'c' };
let p3 = p1.mixup(p2);
println!("p3.x = {}, p3.y = {}", p3.x, p3.y);
do_trait()
}
fn do_closure() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
let total: i32 = v1_iter.sum();
assert_eq!(total, 6);
let v1: Vec<i32> = vec![1, 2, 3];
let v2: Vec<_> = v1.iter().map(|x| x + 1).collect();
assert_eq!(v2, vec![2, 3, 4]);
guessing_number::run_shoes_test();
guessing_number::calling_next_directly();
}
fn do_smart_p() {
let x = 5;
let y = &x;
assert_eq!(5, x);
assert_eq!(5, *y);
let x1 = 5;
let y1 = Box::new(x);
assert_eq!(5, x1);
assert_eq!(5, *y1);
}
fn do_concurrency() {
use std::thread;
use std::time::Duration;
let handle = thread::spawn(|| {
for i in 1..6 {
println!("hi number {} from the spawned thread!", i);
thread::sleep(Duration::from_millis(1));
}
});
for i in 1..5 {
println!("hi number {} from the main thread!", i);
thread::sleep(Duration::from_millis(1));
}
handle.join().unwrap();
do_concurrency1();
}
fn do_concurrency1() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let vals = vec![
String::from("你好!"),
String::from("你去做什么?"),
String::from("Why?"),
String::from("那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
// thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
do_concurrency2();
do_concurrency3();
do_match()
}
fn do_match_p() {
println!("one");
}
fn do_match() {
let x = 1;
match x {
1 => do_match_p(),
2 => println!("two"),
3 => println!("three"),
_ => println!("anything"),
}
//Matching Named Variables
let x = Some(5);
let y = 10;
match x {
Some(50) => println!("Got 50"),
Some(y) => println!("Matched, y = {:?}", y),
_ => println!("Default case, x = {:?}", x),
}
println!("at the end: x = {:?}, y = {:?}", x, y);
let x = 1;
match x {
1 | 2 => println!("one or two"),
3 => println!("three"),
_ => println!("anything"),
}
let x = 2;
match x {
1...5 => println!("one through five"),
_ => println!("something else"),
}
let x = 'A';
match x {
'a'...'j' => println!("early ASCII letter"),
'k'...'z' => println!("late ASCII letter"),
'A'...'Z' => println!("UP ASCII letter"),
_ => println!("something else"),
}
//Destructuring to Break Apart Values
let p = Point { x: 0, y: 7 };
match p {
Point { x, y: 0 } => println!("On the x axis at {}", x),
Point { x: 0, y } => println!("On the y axis at {}", y),
Point { x, y } => println!("On neither axis: ({}, {})", x, y),
}
let msg = Message::ChangeColor(Color::Hsv(0, 160, 255));
match msg {
Message::ChangeColor(Color::Rgb(r, g, b)) => {
println!("Change the color to red {}, green {}, and blue {}", r, g, b)
}
Message::ChangeColor(Color::Hsv(h, s, v)) => println!(
"Change the color to hue {}, saturation {}, and value {}",
h, s, v
),
_ => (),
}
//bind
do_match1();
//Rust's unsafe code
do_unsafe();
}
//Rust unsafe code demo
fn do_unsafe() {
//doesn’t enforce these memory safety guarantees.
//Gaining extra superpowers.
//You can take four actions in unsafe Rust
//Dereference a raw pointer
//Call an unsafe function or method
//Access or modify a mutable static variable
//Implement an unsafe trait
}
fn do_match1() {
let msg = MessageNum::Hello { id: 5 };
match msg {
MessageNum::Hello {
id: id_variable @ 3...7,
} => println!("Found an id in range: {}", id_variable),
MessageNum::Hello { id: 10...12 } => println!("Found an id in another range"),
MessageNum::Hello { id } => println!("Found some other id: {}", id),
}
}
enum MessageNum {
Hello { id: i32 },
}
enum Color {
Rgb(i32, i32, i32),
Hsv(i32, i32, i32),
}
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(Color),
}
//Similarities Between RefCell<T>/Rc<T> and Mutex<T>/Arc<T>
fn do_concurrency2() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
//VIP: producer and consumer model
let (tx, rx) = mpsc::channel();
let tx1 = mpsc::Sender::clone(&tx);
thread::spawn(move || {
let vals = vec![
String::from("1:你好!"),
String::from("1:你去做什么?"),
String::from("1:Why?"),
String::from("1:那很好呀!"),
];
for val in vals {
tx1.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
thread::spawn(move || {
let vals = vec![
String::from("2:你好!"),
String::from("2:你去做什么?"),
String::from("2:Why?"),
String::from("2:那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
}
fn do_concurrency3() {
use std::sync::{Arc, Mutex};
use std::thread;
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
println!("Result: {}", *counter.lock().unwrap());
handle.join().unwrap();
}
println!("Result: {}", *counter.lock().unwrap());
}
trait Show {
fn show(&self) -> String;
}
impl Show for i32 {
fn show(&self) -> String {
format!("i32 value : {}", self)
}
}
impl Show for f64 {
fn show(&self) -> String {
format!("f64 value : {}", self)
}
}
trait Quack {
fn quack(&self);
}
struct Duck();
impl Quack for Duck {
fn quack(&self) {
println!("quack!");
}
}
struct RandomBird {
is_a_parrot: bool,
}
impl Quack for RandomBird {
fn quack(&self) {
if !self.is_a_parrot {
println!("quack!");
} else {
println!("squawk!");
}
}
}
// and why the hell not!
impl Quack for i32 {
fn quack(&self) {
for i in 0..*self {
print!("quack {} ", i);
}
println!();
}
}
trait Name {
fn name(&self) -> String;
fn upper_case(&self) -> String {
self.name().to_uppercase()
}
}
struct Toy();
impl Name for Toy {
fn name(&self) -> String {
"Toy".to_string()
}
}
fn quack() {
let duck1 = Duck();
let duck2 = RandomBird { is_a_parrot: false };
let parrot = RandomBird { is_a_parrot: true };
let i = 4;
let ducks: Vec<&Quack> = vec![&duck1, &duck2, &parrot, &i];
for d in &ducks {
d.quack();
}
let t = Toy();
assert_eq!(t.name(), "Toy".to_string());
assert_eq!(t.upper_case(), "TOY".to_string());
}
fn do_oop() {
let nvalue = Box::new(78);
let fvalue = Box::new(98.88);
let vc: Vec<Box<Show>> = vec![nvalue, fvalue];
for d in &vc {
println!("show {}", d.show());
}
//oop interface
quack();
}
fn do_float() {
let x = 2.0; // f64
let y: f32 = 3.0; // f32
println!("x:{}, y:{} ", x, y);
do_compound();
//expression
println!("zero number ; {}", zero_plus(23));
let a = [10, 20];
for element in a.iter() {
println!("the value is: {}", element);
}
for number in (1..4).rev() {
print!("{}, ", number);
}
//slice
let s = String::from("The Rust Programming Language");
let s1 = &s;
let s2 = &s;
println!("s1: {}, s2: {}", s1, s2);
let s3 = &s;
println!("s3: {}", s3);
string_slice();
do_struct();
do_map();
//do_err();
do_generic();
do_closure();
do_smart_p();
do_concurrency();
do_oop();
}
fn zero_plus(i: i32) -> i32 {
0 + i
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
//fn area(r: &Rectangle) -> u32 {
// r.height * r.width
//}
impl Rectangle {
fn area(&self) -> u32 {
self.height * self.width
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
fn square(size: u32) -> Rectangle {
Rectangle {
width: size,
height: size,
}
}
}
fn do_struct() {
let rect1 = Rectangle {
width: 20,
height: 50,
};
let rect2 = Rectangl | i = 100;
println!("change i: {}", i);
// const declare
const MAX_POINTS: u32 = 100_000;
println!("constant variable MAX_POINT: {}", MAX_POINTS);
//shadowing
let x = 5;
let x = x + 1;
let x = x * 2;
println!("The value of x is: {}", x);
let spaces = " ";
let spaces = spaces.len();
println!("space number :{}", spaces);
// floating-point numbers
do_float();
//guess_num()
}
use std::fmt;
fn show_item<T: fmt::Display>(item: T) {
println!("Item: {}", item);
}
struct CanDisplay;
impl fmt::Display for CanDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CanDisplay")
}
}
struct AlsoDisplay;
impl fmt::Display for AlsoDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "AlsoDisplay")
}
}
//1. Static Dispatch
fn do_static_dispatch() {
let a: CanDisplay = CanDisplay;
let b: AlsoDisplay = AlsoDisplay;
show_item(a); // stdout `Item: CanDisplay`
show_item(b); // stdout `Item: AlsoDisplay`
}
fn get_numbers(a: u32, b: u32) -> impl Iterator<Item = u32> {
(a..b).filter(|x| x % 100 == 0)
}
//2. Dynamic Dispatch
// impl trait
fn do_advanced_trait() {
for n in get_numbers(100, 1001) {
print!("{} \t", n);
}
}
//3. Specifying Placeholder Types in Trait Definitions with Associated Types
// pub trait Iterator {
// type Item;
// fn next(&mut self) -> Option<Self::Item>;
// }
//// Item is the placeholder type.
///
// 4. Fully Qualified Syntax for Disambiguation: Calling Methods with the Same Name
trait Pilot {
fn fly(&self);
}
trait Wizard {
fn fly(&self);
}
struct Human;
impl Pilot for Human {
fn fly(&self) {
println!("This is your captain speaking. Pilot!");
}
}
impl Wizard for Human {
fn fly(&self) {
println!("Wizard, up!");
}
}
impl Human {
fn fly(&self) {
println!("*waving arms furiously*");
}
}
fn do_advanced_trait2() {
let person = Human;
Pilot::fly(&person);
Wizard::fly(&person);
person.fly();
}
trait Animal {
fn baby_name() -> String;
}
struct Dog;
impl Dog {
fn baby_name() -> String {
String::from("Spot")
}
}
impl Animal for Dog {
fn baby_name() -> String {
String::from("puppy")
}
}
fn do_advanced_trait3() {
println!("A baby dog is called a {}", Dog::baby_name());
println!("A baby dog is called a {}", <Dog as Animal>::baby_name());
}
trait OutlinePrint: fmt::Display {
fn outline_print(&self) {
let output = self.to_string();
let len = output.len();
println!("{}", "*".repeat(len + 4));
println!("*{}*", " ".repeat(len + 2));
println!("* {} *", output);
println!("*{}*", " ".repeat(len + 2));
println!("{}", "*".repeat(len + 4));
}
}
struct PointXY {
x: i32,
y: i32,
}
impl OutlinePrint for PointXY {}
impl fmt::Display for PointXY {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
//5. Using Super-traits to Require One Trait’s Functionality Within Another Trait
fn do_advanced_trait4() {
let xy = PointXY { x: 10, y: 30 };
xy.outline_print();
}
//6. Using the New-type Pattern to Implement External Traits on External Types
struct Wrapper(Vec<String>);
impl fmt::Display for Wrapper {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{}]", self.0.join(", "))
}
}
fn do_advanced_trait5() {
let w = Wrapper(vec![String::from("Hi, "), String::from("Rust!")]);
println!("w = {}", w);
}
fn do_trait_dispatch() {
do_static_dispatch();
do_advanced_trait();
do_advanced_trait2();
do_advanced_trait3();
do_advanced_trait4();
do_advanced_trait5();
}
use std::ops::Deref;
struct MyBox<T>(T);
impl<T> MyBox<T> {
fn new(x: T) -> MyBox<T> {
MyBox(x)
}
}
impl<T> Deref for MyBox<T> {
type Target = T;
fn deref(&self) -> &T {
&self.0
}
}
impl<T> Drop for MyBox<T> {
fn drop(&mut self) {
println!("dropping MyBox object from memory... ");
}
}
fn do_smart_pointer() {
//1. Using a Box<T> to store data on the heap
let x = Box::new(5);
println!("Box<T> on the heap , x = {}", x);
//2. Using Box<T> Like a Reference
let x = 5;
//let y = &x;
let y = Box::new(x);
assert_eq!(5, x);
assert_eq!(5, *y);
let x = 50;
assert_eq!(50, x);
let y = MyBox::new(x);
//drop method.
drop(y);
MyBox::new("Hello");
}
fn startup_web_server() {
println!("\nStartup Web Server...");
HttpServer::new(|| {
App::new()
.route("/", web::get().to(index))
.service(web::resource("/countries").route(web::get().to(get_country_list)))
})
.bind("0.0.0.0:9090")
.unwrap()
.run()
.unwrap();
println!(">>>exit");
}
use std::io::prelude::*;
use std::net::TcpStream;
use std::string::String;
static HTML: &str = "<!DOCTYPE html><html lang=\"en\"> <head><meta charset=\"utf-8\"><title>Hello!</title></head><body> <h1>Hello!</h1> <p>Hi from Rust</p></body></html>";
fn handle_connection(mut stream: TcpStream) {
let mut buffer = [0; 512];
stream.read(&mut buffer).unwrap();
let get = b"GET / HTTP/1.1\r\n";
let (status_line, content) = if buffer.starts_with(get) {
("HTTP/1.1 200 OK\r\n\r\n", HTML)
} else {
("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.....")
};
let response = format!("{}{}", status_line, content);
stream.write_all(response.as_bytes()).unwrap();
stream.flush().unwrap();
}
fn startup_multiple_threads_server() {
use guessing_number::ThreadPool;
use std::net::TcpListener;
let pool = ThreadPool::new(4);
let listener = TcpListener::bind("0.0.0.0:7878").unwrap();
for stream in listener.incoming() {
let stream = stream.unwrap();
println!("req ...");
pool.execute(|| {
handle_connection(stream);
});
}
}
fn main() {
do_init();
do_trait_dispatch();
do_smart_pointer();
//startup_multiple_threads_server();
//startup_web_server();
}
| e {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("rect1 area: {}", rect1.area());
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
println!("rect1: {:?}", &(Rectangle::square(3)));
// println!(
// "The area of the rectangle is {} square pixels.",
// area(&rect1)
// );
// println!("rect1: {:?}", &rect1);
}
fn do_init() {
//mut and default immutable
let mut i = 0;
println!("init i :{}", i);
| identifier_body |
loadtest_types.go | /*
Copyright 2020 gRPC authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NOTE: AFTER EDITS, YOU MUST RUN `make manifests` AND `make` TO REGENERATE
// CODE.
// Clone defines expectations regarding which repository and snapshot the test
// should use.
type Clone struct {
// Image is the name of the container image that can clone code, placing
// it in a /src/workspace directory.
//
// This field is optional. When omitted, a container that can clone
// public GitHub repos over HTTPs is used.
// +optional
Image *string `json:"image,omitempty"`
// Repo is the URL to clone a git repository. With GitHub, this should
// end in a `.git` extension.
// +optional
Repo *string `json:"repo,omitempty"`
// GitRef is a branch, tag or commit hash to checkout after a
// successful clone. This will be the version of the code in the
// /src/workspace directory.
// +optional
GitRef *string `json:"gitRef,omitempty"`
}
// Build defines expectations regarding which container image,
// command, arguments and environment variables are used to build the
// component.
type Build struct {
// Image is the name of the container image that can build code,
// placing an executable in the /src/workspace directory.
//
// This field is optional when a Language is specified on the
// Component. For example, a developer may specify a "java" server.
// Then, this image will default to the most recent gradle image.
// +optional
Image *string `json:"image,omitempty"`
// Command is the path to the executable that will build the code in
// the /src/workspace directory. If unspecified, the entrypoint for
// the container is used.
// +optional
Command []string `json:"command,omitempty"`
// Args provide command line arguments to the command. If a command
// is not specified, these arguments will be ignored in favor of the
// default arguments for container's entrypoint.
// +optional
Args []string `json:"args,omitempty"`
// Env are environment variables that should be set within the build
// container. This is provided for compilers that alter behavior due
// to certain environment variables.
// +optional
Env []corev1.EnvVar `json:"env,omitempty"`
}
// Driver defines a component that orchestrates the server and clients in the
// test.
type Driver struct {
// Name is a string that uniquely names this driver. Since load tests only
// support one driver, it is not recommended to set this field. If no name
// is explicitly provided, the operator will assign one.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// driver. For example, "cxx" may represent C++.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this driver should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the driver
// can be found. This is used to test alternative implementations for the
// driver. Most often, this will not be set. When unset, the operator will
// use a default driver that is prebuilt.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the driver's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test driver is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Server defines a component that receives traffic from a set of client
// components.
type Server struct {
// Name is a string that distinguishes this server from others in the test.
// Since tests are currently limited to one server, setting this field is not
// recommended. set this field. If no name is explicitly provided, the
// operator will assign one.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// server. For example, "java" may represent Java.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this server should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the server
// can be found. This field should not be set if the code has been prebuilt
// in the run image.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the server's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test server is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Client defines a component that sends traffic to a server component.
type Client struct {
// Name is a string that distinguishes this client from others in the test.
// Explicitly setting a name is recommended when it is helpful to
// differentiate between multiple clients. For example, a test may use
// clients with different settings.
//
// Most often, this field will not be set. When unset, the operator will
// assign a name to the client.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// client. For example, "go" may represent Go.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this client should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the client
// can be found. This field should not be set if the code has been prebuilt
// in the run image.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the client's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test client is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Results defines where and how test results and artifacts should be
// stored.
type Results struct {
// BigQueryTable names a dataset where the results of the test
// should be stored. If omitted, no results are saved to BigQuery.
// +optional
BigQueryTable *string `json:"bigQueryTable,omitempty"`
}
// LoadTestSpec defines the desired state of LoadTest
type LoadTestSpec struct {
// Driver is the component that orchestrates the test. It may be
// unspecified, allowing the system to choose the appropriate driver.
// +optional
Driver *Driver `json:"driver,omitempty"`
// Servers are a list of components that receive traffic from
// clients.
// +optional
Servers []Server `json:"servers,omitempty"`
// Clients are a list of components that send traffic to servers.
// +optional
Clients []Client `json:"clients,omitempty"`
// Results configures where the results of the test should be
// stored. When omitted, the results will only be stored in
// Kubernetes for a limited time.
// +optional
Results *Results `json:"results,omitempty"`
// ScenariosJSON is string with the contents of a Scenarios message,
// formatted as JSON. See the Scenarios protobuf definition for details:
// https://github.com/grpc/grpc-proto/blob/master/grpc/testing/control.proto.
// +optional
ScenariosJSON string `json:"scenariosJSON,omitempty"`
// Timeout provides the longest running time allowed for a LoadTest.
// +kubebuilder:validation:Minimum:=1
TimeoutSeconds int32 `json:"timeoutSeconds"`
// TTL provides the longest time a LoadTest can live on the cluster.
// +kubebuilder:validation:Minimum:=1
TTLSeconds int32 `json:"ttlSeconds"`
}
// LoadTestState reflects the derived state of the load test from its
// components. If any one component has errored, the load test will be marked in
// an Errored state, too. This will occur even if the other components are
// running or succeeded.
// +kubebuilder:default=Unknown
type LoadTestState string
const (
// Unknown states indicate that the load test is in an indeterminate state.
// Something may have gone wrong, but it may be recoverable. No assumption
// should be made about the next state. It may transition to any other state
// or remain Unknown until a timeout occurs.
Unknown LoadTestState = "Unknown"
// Initializing states indicate that load test's pods are under construction.
// This may mean that code is being cloned, built or assembled.
Initializing LoadTestState = "Initializing"
// Running states indicate that the initialization for a load test's pods has
// completed successfully. The run container has started.
Running LoadTestState = "Running"
// Succeeded states indicate the driver pod's run container has terminated
// successfully, signaled by a zero exit code.
Succeeded LoadTestState = "Succeeded"
// Errored states indicate the load test encountered a problem that prevented
// a successful run.
Errored LoadTestState = "Errored"
)
// IsTerminated returns true if the test has finished due to a success, failure
// or error. Otherwise, it returns false.
func (lts LoadTestState) IsTerminated() bool {
return lts == Succeeded || lts == Errored
}
// InitContainerError is the reason string when an init container has failed on
// one of the load test's pods.
var InitContainerError = "InitContainerError"
// ContainerError is the reason string when a container has failed on one of the
// load test's pods.
var ContainerError = "ContainerError"
// FailedSettingDefaultsError is the reason string when defaults could not be
// set on a load test.
var FailedSettingDefaultsError = "FailedSettingDefaults"
// ConfigurationError is the reason string when a LoadTest spec is invalid.
var ConfigurationError = "ConfigurationError"
// PodsMissing is the reason string when the load test is missing pods and is still
// in the Initializing state.
var PodsMissing = "PodsMissing"
// PoolError is the reason string when a driver, client or server requires nodes
// from a nonexistent pool.
var PoolError = "PoolError"
// TimeoutErrored is the reason string when the load test has not yet terminated
// but exceeded the timeout.
var TimeoutErrored = "TimeoutErrored" | // LoadTestStatus defines the observed state of LoadTest
type LoadTestStatus struct {
// State identifies the current state of the load test. It is
// important to note that this state is level-based. This means its
// transition is non-deterministic.
State LoadTestState `json:"state"`
// Reason is a camel-case string that indicates the reasoning behind the
// current state.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human legible string that describes the current state.
// +optional
Message string `json:"message,omitempty"`
// StartTime is the time when the controller first reconciled the load test.
// It is maintained in a best-attempt effort; meaning, it is not guaranteed to
// be correct.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty"`
// StopTime is the time when the controller last entered the Succeeded,
// Failed or Errored states.
// +optional
StopTime *metav1.Time `json:"stopTime,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// LoadTest is the Schema for the loadtests API
// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state`
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
type LoadTest struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec LoadTestSpec `json:"spec,omitempty"`
Status LoadTestStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// LoadTestList contains a list of LoadTest
type LoadTestList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []LoadTest `json:"items"`
}
func init() {
SchemeBuilder.Register(&LoadTest{}, &LoadTestList{})
} |
// KubernetesError is the reason string when an issue occurs with Kubernetes
// that is not known to be directly related to a load test.
var KubernetesError = "KubernetesError"
| random_line_split |
loadtest_types.go | /*
Copyright 2020 gRPC authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NOTE: AFTER EDITS, YOU MUST RUN `make manifests` AND `make` TO REGENERATE
// CODE.
// Clone defines expectations regarding which repository and snapshot the test
// should use.
type Clone struct {
// Image is the name of the container image that can clone code, placing
// it in a /src/workspace directory.
//
// This field is optional. When omitted, a container that can clone
// public GitHub repos over HTTPs is used.
// +optional
Image *string `json:"image,omitempty"`
// Repo is the URL to clone a git repository. With GitHub, this should
// end in a `.git` extension.
// +optional
Repo *string `json:"repo,omitempty"`
// GitRef is a branch, tag or commit hash to checkout after a
// successful clone. This will be the version of the code in the
// /src/workspace directory.
// +optional
GitRef *string `json:"gitRef,omitempty"`
}
// Build defines expectations regarding which container image,
// command, arguments and environment variables are used to build the
// component.
type Build struct {
// Image is the name of the container image that can build code,
// placing an executable in the /src/workspace directory.
//
// This field is optional when a Language is specified on the
// Component. For example, a developer may specify a "java" server.
// Then, this image will default to the most recent gradle image.
// +optional
Image *string `json:"image,omitempty"`
// Command is the path to the executable that will build the code in
// the /src/workspace directory. If unspecified, the entrypoint for
// the container is used.
// +optional
Command []string `json:"command,omitempty"`
// Args provide command line arguments to the command. If a command
// is not specified, these arguments will be ignored in favor of the
// default arguments for container's entrypoint.
// +optional
Args []string `json:"args,omitempty"`
// Env are environment variables that should be set within the build
// container. This is provided for compilers that alter behavior due
// to certain environment variables.
// +optional
Env []corev1.EnvVar `json:"env,omitempty"`
}
// Driver defines a component that orchestrates the server and clients in the
// test.
type Driver struct {
// Name is a string that uniquely names this driver. Since load tests only
// support one driver, it is not recommended to set this field. If no name
// is explicitly provided, the operator will assign one.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// driver. For example, "cxx" may represent C++.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this driver should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the driver
// can be found. This is used to test alternative implementations for the
// driver. Most often, this will not be set. When unset, the operator will
// use a default driver that is prebuilt.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the driver's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test driver is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Server defines a component that receives traffic from a set of client
// components.
type Server struct {
// Name is a string that distinguishes this server from others in the test.
// Since tests are currently limited to one server, setting this field is not
// recommended. set this field. If no name is explicitly provided, the
// operator will assign one.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// server. For example, "java" may represent Java.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this server should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the server
// can be found. This field should not be set if the code has been prebuilt
// in the run image.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the server's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test server is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Client defines a component that sends traffic to a server component.
type Client struct {
// Name is a string that distinguishes this client from others in the test.
// Explicitly setting a name is recommended when it is helpful to
// differentiate between multiple clients. For example, a test may use
// clients with different settings.
//
// Most often, this field will not be set. When unset, the operator will
// assign a name to the client.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// client. For example, "go" may represent Go.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this client should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the client
// can be found. This field should not be set if the code has been prebuilt
// in the run image.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the client's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test client is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Results defines where and how test results and artifacts should be
// stored.
type Results struct {
// BigQueryTable names a dataset where the results of the test
// should be stored. If omitted, no results are saved to BigQuery.
// +optional
BigQueryTable *string `json:"bigQueryTable,omitempty"`
}
// LoadTestSpec defines the desired state of LoadTest
type LoadTestSpec struct {
// Driver is the component that orchestrates the test. It may be
// unspecified, allowing the system to choose the appropriate driver.
// +optional
Driver *Driver `json:"driver,omitempty"`
// Servers are a list of components that receive traffic from
// clients.
// +optional
Servers []Server `json:"servers,omitempty"`
// Clients are a list of components that send traffic to servers.
// +optional
Clients []Client `json:"clients,omitempty"`
// Results configures where the results of the test should be
// stored. When omitted, the results will only be stored in
// Kubernetes for a limited time.
// +optional
Results *Results `json:"results,omitempty"`
// ScenariosJSON is string with the contents of a Scenarios message,
// formatted as JSON. See the Scenarios protobuf definition for details:
// https://github.com/grpc/grpc-proto/blob/master/grpc/testing/control.proto.
// +optional
ScenariosJSON string `json:"scenariosJSON,omitempty"`
// Timeout provides the longest running time allowed for a LoadTest.
// +kubebuilder:validation:Minimum:=1
TimeoutSeconds int32 `json:"timeoutSeconds"`
// TTL provides the longest time a LoadTest can live on the cluster.
// +kubebuilder:validation:Minimum:=1
TTLSeconds int32 `json:"ttlSeconds"`
}
// LoadTestState reflects the derived state of the load test from its
// components. If any one component has errored, the load test will be marked in
// an Errored state, too. This will occur even if the other components are
// running or succeeded.
// +kubebuilder:default=Unknown
type LoadTestState string
const (
// Unknown states indicate that the load test is in an indeterminate state.
// Something may have gone wrong, but it may be recoverable. No assumption
// should be made about the next state. It may transition to any other state
// or remain Unknown until a timeout occurs.
Unknown LoadTestState = "Unknown"
// Initializing states indicate that load test's pods are under construction.
// This may mean that code is being cloned, built or assembled.
Initializing LoadTestState = "Initializing"
// Running states indicate that the initialization for a load test's pods has
// completed successfully. The run container has started.
Running LoadTestState = "Running"
// Succeeded states indicate the driver pod's run container has terminated
// successfully, signaled by a zero exit code.
Succeeded LoadTestState = "Succeeded"
// Errored states indicate the load test encountered a problem that prevented
// a successful run.
Errored LoadTestState = "Errored"
)
// IsTerminated returns true if the test has finished due to a success, failure
// or error. Otherwise, it returns false.
func (lts LoadTestState) IsTerminated() bool {
return lts == Succeeded || lts == Errored
}
// InitContainerError is the reason string when an init container has failed on
// one of the load test's pods.
var InitContainerError = "InitContainerError"
// ContainerError is the reason string when a container has failed on one of the
// load test's pods.
var ContainerError = "ContainerError"
// FailedSettingDefaultsError is the reason string when defaults could not be
// set on a load test.
var FailedSettingDefaultsError = "FailedSettingDefaults"
// ConfigurationError is the reason string when a LoadTest spec is invalid.
var ConfigurationError = "ConfigurationError"
// PodsMissing is the reason string when the load test is missing pods and is still
// in the Initializing state.
var PodsMissing = "PodsMissing"
// PoolError is the reason string when a driver, client or server requires nodes
// from a nonexistent pool.
var PoolError = "PoolError"
// TimeoutErrored is the reason string when the load test has not yet terminated
// but exceeded the timeout.
var TimeoutErrored = "TimeoutErrored"
// KubernetesError is the reason string when an issue occurs with Kubernetes
// that is not known to be directly related to a load test.
var KubernetesError = "KubernetesError"
// LoadTestStatus defines the observed state of LoadTest
type LoadTestStatus struct {
// State identifies the current state of the load test. It is
// important to note that this state is level-based. This means its
// transition is non-deterministic.
State LoadTestState `json:"state"`
// Reason is a camel-case string that indicates the reasoning behind the
// current state.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human legible string that describes the current state.
// +optional
Message string `json:"message,omitempty"`
// StartTime is the time when the controller first reconciled the load test.
// It is maintained in a best-attempt effort; meaning, it is not guaranteed to
// be correct.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty"`
// StopTime is the time when the controller last entered the Succeeded,
// Failed or Errored states.
// +optional
StopTime *metav1.Time `json:"stopTime,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// LoadTest is the Schema for the loadtests API
// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state`
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
type LoadTest struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec LoadTestSpec `json:"spec,omitempty"`
Status LoadTestStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// LoadTestList contains a list of LoadTest
type LoadTestList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []LoadTest `json:"items"`
}
func | () {
SchemeBuilder.Register(&LoadTest{}, &LoadTestList{})
}
| init | identifier_name |
loadtest_types.go | /*
Copyright 2020 gRPC authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NOTE: AFTER EDITS, YOU MUST RUN `make manifests` AND `make` TO REGENERATE
// CODE.
// Clone defines expectations regarding which repository and snapshot the test
// should use.
type Clone struct {
// Image is the name of the container image that can clone code, placing
// it in a /src/workspace directory.
//
// This field is optional. When omitted, a container that can clone
// public GitHub repos over HTTPs is used.
// +optional
Image *string `json:"image,omitempty"`
// Repo is the URL to clone a git repository. With GitHub, this should
// end in a `.git` extension.
// +optional
Repo *string `json:"repo,omitempty"`
// GitRef is a branch, tag or commit hash to checkout after a
// successful clone. This will be the version of the code in the
// /src/workspace directory.
// +optional
GitRef *string `json:"gitRef,omitempty"`
}
// Build defines expectations regarding which container image,
// command, arguments and environment variables are used to build the
// component.
type Build struct {
// Image is the name of the container image that can build code,
// placing an executable in the /src/workspace directory.
//
// This field is optional when a Language is specified on the
// Component. For example, a developer may specify a "java" server.
// Then, this image will default to the most recent gradle image.
// +optional
Image *string `json:"image,omitempty"`
// Command is the path to the executable that will build the code in
// the /src/workspace directory. If unspecified, the entrypoint for
// the container is used.
// +optional
Command []string `json:"command,omitempty"`
// Args provide command line arguments to the command. If a command
// is not specified, these arguments will be ignored in favor of the
// default arguments for container's entrypoint.
// +optional
Args []string `json:"args,omitempty"`
// Env are environment variables that should be set within the build
// container. This is provided for compilers that alter behavior due
// to certain environment variables.
// +optional
Env []corev1.EnvVar `json:"env,omitempty"`
}
// Driver defines a component that orchestrates the server and clients in the
// test.
type Driver struct {
// Name is a string that uniquely names this driver. Since load tests only
// support one driver, it is not recommended to set this field. If no name
// is explicitly provided, the operator will assign one.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// driver. For example, "cxx" may represent C++.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this driver should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the driver
// can be found. This is used to test alternative implementations for the
// driver. Most often, this will not be set. When unset, the operator will
// use a default driver that is prebuilt.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the driver's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test driver is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Server defines a component that receives traffic from a set of client
// components.
type Server struct {
// Name is a string that distinguishes this server from others in the test.
// Since tests are currently limited to one server, setting this field is not
// recommended. set this field. If no name is explicitly provided, the
// operator will assign one.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// server. For example, "java" may represent Java.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this server should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the server
// can be found. This field should not be set if the code has been prebuilt
// in the run image.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the server's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test server is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Client defines a component that sends traffic to a server component.
type Client struct {
// Name is a string that distinguishes this client from others in the test.
// Explicitly setting a name is recommended when it is helpful to
// differentiate between multiple clients. For example, a test may use
// clients with different settings.
//
// Most often, this field will not be set. When unset, the operator will
// assign a name to the client.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// client. For example, "go" may represent Go.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this client should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the client
// can be found. This field should not be set if the code has been prebuilt
// in the run image.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the client's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test client is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Results defines where and how test results and artifacts should be
// stored.
type Results struct {
// BigQueryTable names a dataset where the results of the test
// should be stored. If omitted, no results are saved to BigQuery.
// +optional
BigQueryTable *string `json:"bigQueryTable,omitempty"`
}
// LoadTestSpec defines the desired state of LoadTest
type LoadTestSpec struct {
// Driver is the component that orchestrates the test. It may be
// unspecified, allowing the system to choose the appropriate driver.
// +optional
Driver *Driver `json:"driver,omitempty"`
// Servers are a list of components that receive traffic from
// clients.
// +optional
Servers []Server `json:"servers,omitempty"`
// Clients are a list of components that send traffic to servers.
// +optional
Clients []Client `json:"clients,omitempty"`
// Results configures where the results of the test should be
// stored. When omitted, the results will only be stored in
// Kubernetes for a limited time.
// +optional
Results *Results `json:"results,omitempty"`
// ScenariosJSON is string with the contents of a Scenarios message,
// formatted as JSON. See the Scenarios protobuf definition for details:
// https://github.com/grpc/grpc-proto/blob/master/grpc/testing/control.proto.
// +optional
ScenariosJSON string `json:"scenariosJSON,omitempty"`
// Timeout provides the longest running time allowed for a LoadTest.
// +kubebuilder:validation:Minimum:=1
TimeoutSeconds int32 `json:"timeoutSeconds"`
// TTL provides the longest time a LoadTest can live on the cluster.
// +kubebuilder:validation:Minimum:=1
TTLSeconds int32 `json:"ttlSeconds"`
}
// LoadTestState reflects the derived state of the load test from its
// components. If any one component has errored, the load test will be marked in
// an Errored state, too. This will occur even if the other components are
// running or succeeded.
// +kubebuilder:default=Unknown
type LoadTestState string
const (
// Unknown states indicate that the load test is in an indeterminate state.
// Something may have gone wrong, but it may be recoverable. No assumption
// should be made about the next state. It may transition to any other state
// or remain Unknown until a timeout occurs.
Unknown LoadTestState = "Unknown"
// Initializing states indicate that load test's pods are under construction.
// This may mean that code is being cloned, built or assembled.
Initializing LoadTestState = "Initializing"
// Running states indicate that the initialization for a load test's pods has
// completed successfully. The run container has started.
Running LoadTestState = "Running"
// Succeeded states indicate the driver pod's run container has terminated
// successfully, signaled by a zero exit code.
Succeeded LoadTestState = "Succeeded"
// Errored states indicate the load test encountered a problem that prevented
// a successful run.
Errored LoadTestState = "Errored"
)
// IsTerminated returns true if the test has finished due to a success, failure
// or error. Otherwise, it returns false.
func (lts LoadTestState) IsTerminated() bool {
return lts == Succeeded || lts == Errored
}
// InitContainerError is the reason string when an init container has failed on
// one of the load test's pods.
var InitContainerError = "InitContainerError"
// ContainerError is the reason string when a container has failed on one of the
// load test's pods.
var ContainerError = "ContainerError"
// FailedSettingDefaultsError is the reason string when defaults could not be
// set on a load test.
var FailedSettingDefaultsError = "FailedSettingDefaults"
// ConfigurationError is the reason string when a LoadTest spec is invalid.
var ConfigurationError = "ConfigurationError"
// PodsMissing is the reason string when the load test is missing pods and is still
// in the Initializing state.
var PodsMissing = "PodsMissing"
// PoolError is the reason string when a driver, client or server requires nodes
// from a nonexistent pool.
var PoolError = "PoolError"
// TimeoutErrored is the reason string when the load test has not yet terminated
// but exceeded the timeout.
var TimeoutErrored = "TimeoutErrored"
// KubernetesError is the reason string when an issue occurs with Kubernetes
// that is not known to be directly related to a load test.
var KubernetesError = "KubernetesError"
// LoadTestStatus defines the observed state of LoadTest
type LoadTestStatus struct {
// State identifies the current state of the load test. It is
// important to note that this state is level-based. This means its
// transition is non-deterministic.
State LoadTestState `json:"state"`
// Reason is a camel-case string that indicates the reasoning behind the
// current state.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human legible string that describes the current state.
// +optional
Message string `json:"message,omitempty"`
// StartTime is the time when the controller first reconciled the load test.
// It is maintained in a best-attempt effort; meaning, it is not guaranteed to
// be correct.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty"`
// StopTime is the time when the controller last entered the Succeeded,
// Failed or Errored states.
// +optional
StopTime *metav1.Time `json:"stopTime,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// LoadTest is the Schema for the loadtests API
// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state`
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
type LoadTest struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec LoadTestSpec `json:"spec,omitempty"`
Status LoadTestStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// LoadTestList contains a list of LoadTest
type LoadTestList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []LoadTest `json:"items"`
}
func init() | {
SchemeBuilder.Register(&LoadTest{}, &LoadTestList{})
} | identifier_body | |
tgsw.rs | use crate::numerics::Torus32;
use crate::polynomial::{IntPolynomial, Polynomial, TorusPolynomial};
use crate::tlwe::{TLweKey, TLweParameters, TLweSample};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswParams {
/// Decomposition length
l: i32,
/// log_2(Bg)
bg_bit: i32,
/// Decomposition base (must be a power of 2)
bg: i32,
/// Bg/2
half_bg: i32,
/// Bg-1
mask_mod: u32,
/// Parameters of each row
pub(crate) tlwe_params: TLweParameters,
/// Number of rows = (k+1)*l
kpl: i32,
/// powers of Bgbit
h: Vec<Torus32>,
/// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) + ... + 2^(32-l*Bgbit))
offset: u32,
}
impl TGswParams {
pub fn new(l: i32, bg_bit: i32, tlwe_params: TLweParameters) -> Self {
let bg = (1 << bg_bit) as i32;
let half_bg = (bg >> 1) as i32;
let mask_mod = (bg - 1) as u32;
let mut h = vec![l];
for i in 0..l {
let kk = (32 - (i + 1)) * bg_bit;
// 1/(Bg^(i+1)) as a Torus32
h.push(1_i32.checked_shl(kk as u32).unwrap_or(0));
}
// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) + ... + 2^(32-l*Bgbit))
let temp: u32 = (0..1).map(|i| 1 << (32 - (i + 1) * bg_bit)).sum();
let offset = temp * half_bg as u32;
let kpl = (tlwe_params.k + 1) * l;
Self {
l,
bg_bit,
bg,
half_bg,
mask_mod,
tlwe_params,
kpl,
h,
offset,
}
}
}
pub struct TGswKey {
/// Parameters of the TGswKey
pub(crate) params: TGswParams,
// the tlwe params of each rows
// tlwe_params: TLweParameters,
pub(crate) tlwe_key: TLweKey,
}
impl TGswKey {
/// Same key as in TLwe
pub(crate) fn generate(params: &TGswParams) -> Self {
let tlwe_params = params.tlwe_params.clone();
let tlwe_key = TLweKey::generate(&tlwe_params);
Self {
params: params.clone(),
// tlwe_params,
tlwe_key,
}
}
pub(crate) fn encrypt(&self, result: &mut TGswSample, message: i32, alpha: f64) {
result.encrypt_zero(alpha, self);
result.add_mu_int_h(message, &self.params);
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswSample {
/// (k+1)*l TLwe Sample
all_sample: Vec<Vec<TLweSample>>,
/// Horizontal blocks (l lines) of TGSW matrix
// bloc_sample: Vec<TLweSample>,
k: i32,
l: i32,
}
impl TGswSample {
pub(crate) fn new(params: &TGswParams) -> Self {
let k = params.tlwe_params.k;
// Lines / rows
let l = params.l;
// TODO: find out if this is correctamente
let all_sample = vec![vec![TLweSample::new(¶ms.tlwe_params); (k + 1) as usize]; l as usize];
Self { all_sample, k, l }
}
pub(crate) fn encrypt_zero(&mut self, alpha: f64, key: &TGswKey) {
let rl_key = &key.tlwe_key;
self.all_sample[0]
.iter_mut()
.for_each(|s| s.encrypt_zero(alpha, rl_key));
// for p in 0..kpl as usize {
// self.all_sample[0][p].encrypt_zero(alpha, rl_key);
// }
}
pub(crate) fn add_mu_int_h(&mut self, message: i32, params: &TGswParams) {
let h = ¶ms.h;
// TFHE comment: Compute self += H
// My comment: Compute self += H * message (ish)
self.all_sample = self
.all_sample
.iter()
.enumerate()
.map(|(i, is): (usize, &Vec<TLweSample>)| {
is.iter()
.map(|js: &TLweSample| {
let new_a: Vec<TorusPolynomial> = js
.a
.iter()
.map(|a: &TorusPolynomial| {
let new_coefs = a
.coefs
.iter()
.enumerate()
.map(|(coef_idx, coef): (usize, &i32)| {
if coef_idx == 0 {
coef + message * h[i]
} else {
*coef
}
})
.collect::<Vec<Torus32>>();
TorusPolynomial::from(new_coefs)
})
.collect();
TLweSample {
a: new_a,
..js.clone()
}
})
.collect()
})
.collect();
// Is equivalent to:
// for i in 0..l as usize {
// for j in 0..=k as usize {
// self.all_sample[i][j].a[j].coefs[0] += message * h[i];
// }
// }
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_h(&mut self, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
// compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
self.all_sample[i][j].a[j].coefs[0] += h[i];
}
}
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_mu_h(&mut self, message: &IntPolynomial, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
let mu = &message.coefs;
// Compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
let target = &mut self.all_sample[i][j].a[j].coefs;
println!("target coefs befor: {:?}", target);
target
.iter_mut()
.zip_eq(mu.iter())
.for_each(|(t, mu)| *t += mu * h[i]);
println!("target coefs after: {:?}", target);
// for jj in 0..n as usize {
// println!(
// "Target len: {}, mu len: {}, h len: {}, jj: {}, n: {}",
// target.len(),
// mu.len(),
// h.len(),
// jj,
// n
// );
// target[jj] += mu[jj] * h[i];
// }
}
}
}
}
/// Update l'accumulateur ligne 5 de l'algo toujours
/// void tGswTLweDecompH(IntPolynomial* result, const TLweSample* sample,const TGswParams* params);
/// accum * sample
pub(crate) fn tgsw_extern_mul_to_tlwe(
accum: &TLweSample,
sample: &TGswSample,
params: &TGswParams,
) -> TLweSample {
let par = ¶ms.tlwe_params;
let mut result = TLweSample {
a: accum
.a
.iter()
.map(|polynomial| TorusPolynomial::zero(polynomial.len()))
.collect(),
current_variance: 0_f64,
k: accum.k,
};
let mut dec = tgsw_tlwe_decomposition_h(accum, params);
let outer = dec.len();
let inner = dec[0].len();
let mut new_dec = vec![vec![IntPolynomial::zero(0); outer]; inner];
#[allow(clippy::needless_range_loop)]
for x in 0..inner {
for y in 0..outer {
std::mem::swap(&mut new_dec[x][y], &mut dec[y][x]);
}
}
let dec = new_dec;
dec
.iter()
.flatten()
.zip_eq(sample.all_sample.iter().flatten())
.for_each(|(d, a)| result.add_mul_r_(d, a, par));
result
// for i in 0..dec.len() as usize {
// println!("kpl: {}, k: {}, l: {}, i: {}", kpl, par.k, params.l, i);
// // TODO: Figure out if this is supposed to be [0][i] instead, or something else...
// let d = &dec[i][0];
// let ass = &sample.all_sample[i][0];
// accum.add_mul_r_(d, ass, par);
// }
// for (int32_t i = 0; i < kpl; i++) {
// tLweAddMulRTo(accum, &dec[i], &sample->all_sample[i], par);
// }
}
/// Decompose a TLwe-sample by the given TGsw parameters
fn tgsw_tlwe_decomposition_h(sample: &TLweSample, params: &TGswParams) -> Vec<Vec<IntPolynomial>> {
let tlwe_params = ¶ms.tlwe_params;
let k = tlwe_params.k;
let mut result =
vec![vec![IntPolynomial::new(tlwe_params.n); params.l as usize]; (tlwe_params.k + 1) as usize];
for i in 0..=k {
// b=a[k]
tgsw_torus32_polynomial_decomposition_h(
&mut result[(i/* /* TODO: Remove this when you figure this out: Don't think this is necessary? */ * l*/)
as usize],
&sample.a[i as usize],
params,
);
// tGswTorus32PolynomialDecompH(result + (i * l), &sample->a[i], params);
}
result
}
fn tgsw_torus32_polynomial_decomposition_h(
result: &mut Vec<IntPolynomial>,
sample: &TorusPolynomial,
params: &TGswParams,
) |
#[cfg(test)]
mod tests {
use super::*;
use crate::polynomial::TorusPolynomial;
fn generate_parameters() -> Vec<TGswParams> {
vec![
TGswParams::new(4, 8, TLweParameters::new(512, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(512, 2, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(1024, 1, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(1024, 2, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(2048, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(2048, 2, 0f64, 1f64)),
]
}
fn generate_keys() -> Vec<TGswKey> {
generate_parameters()
.iter()
.map(TGswKey::generate)
.collect()
}
fn fully_random_tgsw(sample: &mut TGswSample, alpha: f64, params: &TGswParams) {
let l = params.l;
let k = params.tlwe_params.k;
// This is butt-ugly
for j in 0..l as usize {
for i in 0..=k as usize {
let mut row = &mut sample.all_sample[j][i];
for u in 0..=k {
row.a[u as usize] = TorusPolynomial::uniform(row.a.len());
}
row.current_variance = alpha * alpha;
}
}
}
#[test]
fn test_add_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_h(¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { h[i] } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
fn random_int_polynomial(n: i32) -> IntPolynomial {
let mut rng = rand::thread_rng();
use rand::distributions::Distribution;
let d = rand_distr::Uniform::new(i32::MIN, i32::MAX);
let coefs: Vec<i32> = (0..n).map(|_| d.sample(&mut rng) % 10 - 5).collect();
assert_eq!(coefs.len() as i32, n);
IntPolynomial::from(coefs)
}
#[test]
#[ignore]
fn test_add_mu_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
let message = random_int_polynomial(n);
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_mu_h(&message, ¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi*mess:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
if j == u {
new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.zip_eq(message.coefs.iter())
.for_each(|((n, o), m)| assert_eq!(*n, *o + h[i] * (dbg!(*m))));
// for jj in 0..n as usize {
// assert_eq!(
// new_polynomial.coefs[jj],
// old_polynomial.coefs[jj] + h[i] * message.coefs[jj]
// );
// }
} else {
assert!(new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.all(|(a, b)| a == b));
}
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { dbg!(h[i]) } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
}
| {
let n = params.tlwe_params.n;
let l = params.l;
let bg_bit = params.bg_bit;
let mask_mod = params.mask_mod;
let half_bg = params.half_bg;
let offset = params.offset;
// First, add offset to everyone
let buf: Vec<i32> = sample
.coefs
.iter()
.map(|c| c.wrapping_add(offset as i32))
.collect();
// Then, do the decomposition (TODO: in parallel)
#[allow(clippy::needless_range_loop)]
for p in 0..l as usize {
let decal = 32 - (p + 1) as i32 * bg_bit;
let res_p: &mut Vec<i32> = &mut result[p].coefs;
for j in 0..n as usize {
let temp1 = (buf[j] >> decal) & mask_mod as i32;
res_p[j] = temp1 - half_bg;
}
}
} | identifier_body |
tgsw.rs | use crate::numerics::Torus32;
use crate::polynomial::{IntPolynomial, Polynomial, TorusPolynomial};
use crate::tlwe::{TLweKey, TLweParameters, TLweSample};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswParams {
/// Decomposition length
l: i32,
/// log_2(Bg)
bg_bit: i32,
/// Decomposition base (must be a power of 2)
bg: i32,
/// Bg/2
half_bg: i32,
/// Bg-1
mask_mod: u32,
/// Parameters of each row
pub(crate) tlwe_params: TLweParameters,
/// Number of rows = (k+1)*l
kpl: i32,
/// powers of Bgbit
h: Vec<Torus32>,
/// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) + ... + 2^(32-l*Bgbit))
offset: u32,
}
impl TGswParams {
pub fn new(l: i32, bg_bit: i32, tlwe_params: TLweParameters) -> Self {
let bg = (1 << bg_bit) as i32;
let half_bg = (bg >> 1) as i32;
let mask_mod = (bg - 1) as u32;
let mut h = vec![l];
for i in 0..l {
let kk = (32 - (i + 1)) * bg_bit;
// 1/(Bg^(i+1)) as a Torus32
h.push(1_i32.checked_shl(kk as u32).unwrap_or(0));
}
// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) + ... + 2^(32-l*Bgbit))
let temp: u32 = (0..1).map(|i| 1 << (32 - (i + 1) * bg_bit)).sum();
let offset = temp * half_bg as u32;
let kpl = (tlwe_params.k + 1) * l;
Self {
l,
bg_bit,
bg,
half_bg,
mask_mod,
tlwe_params,
kpl,
h,
offset,
}
}
}
pub struct TGswKey {
/// Parameters of the TGswKey
pub(crate) params: TGswParams,
// the tlwe params of each rows
// tlwe_params: TLweParameters,
pub(crate) tlwe_key: TLweKey,
}
impl TGswKey {
/// Same key as in TLwe
pub(crate) fn generate(params: &TGswParams) -> Self {
let tlwe_params = params.tlwe_params.clone();
let tlwe_key = TLweKey::generate(&tlwe_params);
Self {
params: params.clone(),
// tlwe_params,
tlwe_key,
}
}
pub(crate) fn encrypt(&self, result: &mut TGswSample, message: i32, alpha: f64) {
result.encrypt_zero(alpha, self);
result.add_mu_int_h(message, &self.params);
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswSample {
/// (k+1)*l TLwe Sample
all_sample: Vec<Vec<TLweSample>>,
/// Horizontal blocks (l lines) of TGSW matrix
// bloc_sample: Vec<TLweSample>,
k: i32,
l: i32,
}
impl TGswSample {
pub(crate) fn new(params: &TGswParams) -> Self {
let k = params.tlwe_params.k;
// Lines / rows
let l = params.l;
// TODO: find out if this is correctamente
let all_sample = vec![vec![TLweSample::new(¶ms.tlwe_params); (k + 1) as usize]; l as usize];
Self { all_sample, k, l }
}
pub(crate) fn encrypt_zero(&mut self, alpha: f64, key: &TGswKey) {
let rl_key = &key.tlwe_key;
self.all_sample[0]
.iter_mut()
.for_each(|s| s.encrypt_zero(alpha, rl_key));
// for p in 0..kpl as usize {
// self.all_sample[0][p].encrypt_zero(alpha, rl_key);
// }
}
pub(crate) fn add_mu_int_h(&mut self, message: i32, params: &TGswParams) {
let h = ¶ms.h;
// TFHE comment: Compute self += H
// My comment: Compute self += H * message (ish)
self.all_sample = self
.all_sample
.iter()
.enumerate()
.map(|(i, is): (usize, &Vec<TLweSample>)| {
is.iter()
.map(|js: &TLweSample| {
let new_a: Vec<TorusPolynomial> = js
.a
.iter()
.map(|a: &TorusPolynomial| {
let new_coefs = a
.coefs
.iter()
.enumerate()
.map(|(coef_idx, coef): (usize, &i32)| {
if coef_idx == 0 {
coef + message * h[i]
} else {
*coef
}
})
.collect::<Vec<Torus32>>();
TorusPolynomial::from(new_coefs)
})
.collect();
TLweSample {
a: new_a,
..js.clone()
}
})
.collect()
})
.collect();
// Is equivalent to:
// for i in 0..l as usize {
// for j in 0..=k as usize {
// self.all_sample[i][j].a[j].coefs[0] += message * h[i];
// }
// }
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_h(&mut self, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
// compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
self.all_sample[i][j].a[j].coefs[0] += h[i];
}
}
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_mu_h(&mut self, message: &IntPolynomial, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
let mu = &message.coefs;
// Compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
let target = &mut self.all_sample[i][j].a[j].coefs;
println!("target coefs befor: {:?}", target);
target
.iter_mut()
.zip_eq(mu.iter())
.for_each(|(t, mu)| *t += mu * h[i]);
println!("target coefs after: {:?}", target);
// for jj in 0..n as usize {
// println!(
// "Target len: {}, mu len: {}, h len: {}, jj: {}, n: {}",
// target.len(),
// mu.len(),
// h.len(),
// jj,
// n
// );
// target[jj] += mu[jj] * h[i];
// }
}
}
}
}
/// Update l'accumulateur ligne 5 de l'algo toujours
/// void tGswTLweDecompH(IntPolynomial* result, const TLweSample* sample,const TGswParams* params);
/// accum * sample
pub(crate) fn tgsw_extern_mul_to_tlwe(
accum: &TLweSample,
sample: &TGswSample,
params: &TGswParams,
) -> TLweSample {
let par = ¶ms.tlwe_params;
let mut result = TLweSample {
a: accum
.a
.iter()
.map(|polynomial| TorusPolynomial::zero(polynomial.len()))
.collect(),
current_variance: 0_f64,
k: accum.k,
};
let mut dec = tgsw_tlwe_decomposition_h(accum, params);
let outer = dec.len();
let inner = dec[0].len();
let mut new_dec = vec![vec![IntPolynomial::zero(0); outer]; inner];
#[allow(clippy::needless_range_loop)]
for x in 0..inner {
for y in 0..outer {
std::mem::swap(&mut new_dec[x][y], &mut dec[y][x]);
}
}
let dec = new_dec;
dec
.iter()
.flatten()
.zip_eq(sample.all_sample.iter().flatten())
.for_each(|(d, a)| result.add_mul_r_(d, a, par));
result
// for i in 0..dec.len() as usize {
// println!("kpl: {}, k: {}, l: {}, i: {}", kpl, par.k, params.l, i);
// // TODO: Figure out if this is supposed to be [0][i] instead, or something else...
// let d = &dec[i][0];
// let ass = &sample.all_sample[i][0];
// accum.add_mul_r_(d, ass, par);
// }
// for (int32_t i = 0; i < kpl; i++) {
// tLweAddMulRTo(accum, &dec[i], &sample->all_sample[i], par);
// }
}
/// Decompose a TLwe-sample by the given TGsw parameters
fn tgsw_tlwe_decomposition_h(sample: &TLweSample, params: &TGswParams) -> Vec<Vec<IntPolynomial>> {
let tlwe_params = ¶ms.tlwe_params;
let k = tlwe_params.k;
let mut result =
vec![vec![IntPolynomial::new(tlwe_params.n); params.l as usize]; (tlwe_params.k + 1) as usize];
for i in 0..=k {
// b=a[k]
tgsw_torus32_polynomial_decomposition_h(
&mut result[(i/* /* TODO: Remove this when you figure this out: Don't think this is necessary? */ * l*/)
as usize],
&sample.a[i as usize],
params,
);
// tGswTorus32PolynomialDecompH(result + (i * l), &sample->a[i], params);
}
result
}
fn tgsw_torus32_polynomial_decomposition_h(
result: &mut Vec<IntPolynomial>,
sample: &TorusPolynomial,
params: &TGswParams,
) {
let n = params.tlwe_params.n;
let l = params.l;
let bg_bit = params.bg_bit;
let mask_mod = params.mask_mod;
let half_bg = params.half_bg;
let offset = params.offset;
// First, add offset to everyone
let buf: Vec<i32> = sample
.coefs
.iter()
.map(|c| c.wrapping_add(offset as i32))
.collect();
// Then, do the decomposition (TODO: in parallel)
#[allow(clippy::needless_range_loop)]
for p in 0..l as usize {
let decal = 32 - (p + 1) as i32 * bg_bit;
let res_p: &mut Vec<i32> = &mut result[p].coefs;
for j in 0..n as usize {
let temp1 = (buf[j] >> decal) & mask_mod as i32;
res_p[j] = temp1 - half_bg;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::polynomial::TorusPolynomial;
fn generate_parameters() -> Vec<TGswParams> {
vec![
TGswParams::new(4, 8, TLweParameters::new(512, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(512, 2, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(1024, 1, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(1024, 2, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(2048, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(2048, 2, 0f64, 1f64)),
]
}
fn generate_keys() -> Vec<TGswKey> {
generate_parameters()
.iter()
.map(TGswKey::generate)
.collect()
}
fn fully_random_tgsw(sample: &mut TGswSample, alpha: f64, params: &TGswParams) {
let l = params.l;
let k = params.tlwe_params.k;
// This is butt-ugly
for j in 0..l as usize {
for i in 0..=k as usize {
let mut row = &mut sample.all_sample[j][i];
for u in 0..=k {
row.a[u as usize] = TorusPolynomial::uniform(row.a.len());
}
row.current_variance = alpha * alpha;
}
}
}
#[test]
fn | () {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_h(¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { h[i] } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
fn random_int_polynomial(n: i32) -> IntPolynomial {
let mut rng = rand::thread_rng();
use rand::distributions::Distribution;
let d = rand_distr::Uniform::new(i32::MIN, i32::MAX);
let coefs: Vec<i32> = (0..n).map(|_| d.sample(&mut rng) % 10 - 5).collect();
assert_eq!(coefs.len() as i32, n);
IntPolynomial::from(coefs)
}
#[test]
#[ignore]
fn test_add_mu_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
let message = random_int_polynomial(n);
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_mu_h(&message, ¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi*mess:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
if j == u {
new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.zip_eq(message.coefs.iter())
.for_each(|((n, o), m)| assert_eq!(*n, *o + h[i] * (dbg!(*m))));
// for jj in 0..n as usize {
// assert_eq!(
// new_polynomial.coefs[jj],
// old_polynomial.coefs[jj] + h[i] * message.coefs[jj]
// );
// }
} else {
assert!(new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.all(|(a, b)| a == b));
}
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { dbg!(h[i]) } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
}
| test_add_h | identifier_name |
tgsw.rs | use crate::numerics::Torus32;
use crate::polynomial::{IntPolynomial, Polynomial, TorusPolynomial};
use crate::tlwe::{TLweKey, TLweParameters, TLweSample};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswParams {
/// Decomposition length
l: i32,
/// log_2(Bg)
bg_bit: i32,
/// Decomposition base (must be a power of 2)
bg: i32,
/// Bg/2
half_bg: i32,
/// Bg-1
mask_mod: u32,
/// Parameters of each row
pub(crate) tlwe_params: TLweParameters,
/// Number of rows = (k+1)*l
kpl: i32,
/// powers of Bgbit
h: Vec<Torus32>,
/// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) + ... + 2^(32-l*Bgbit))
offset: u32,
}
impl TGswParams {
pub fn new(l: i32, bg_bit: i32, tlwe_params: TLweParameters) -> Self {
let bg = (1 << bg_bit) as i32;
let half_bg = (bg >> 1) as i32;
let mask_mod = (bg - 1) as u32;
let mut h = vec![l];
for i in 0..l {
let kk = (32 - (i + 1)) * bg_bit;
// 1/(Bg^(i+1)) as a Torus32
h.push(1_i32.checked_shl(kk as u32).unwrap_or(0));
}
// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) + ... + 2^(32-l*Bgbit))
let temp: u32 = (0..1).map(|i| 1 << (32 - (i + 1) * bg_bit)).sum();
let offset = temp * half_bg as u32;
let kpl = (tlwe_params.k + 1) * l;
Self {
l,
bg_bit,
bg,
half_bg,
mask_mod,
tlwe_params,
kpl,
h,
offset,
}
}
}
pub struct TGswKey {
/// Parameters of the TGswKey
pub(crate) params: TGswParams,
// the tlwe params of each rows
// tlwe_params: TLweParameters,
pub(crate) tlwe_key: TLweKey,
}
impl TGswKey {
/// Same key as in TLwe
pub(crate) fn generate(params: &TGswParams) -> Self {
let tlwe_params = params.tlwe_params.clone();
let tlwe_key = TLweKey::generate(&tlwe_params);
Self {
params: params.clone(),
// tlwe_params,
tlwe_key,
}
}
pub(crate) fn encrypt(&self, result: &mut TGswSample, message: i32, alpha: f64) {
result.encrypt_zero(alpha, self);
result.add_mu_int_h(message, &self.params);
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswSample {
/// (k+1)*l TLwe Sample
all_sample: Vec<Vec<TLweSample>>,
/// Horizontal blocks (l lines) of TGSW matrix
// bloc_sample: Vec<TLweSample>,
k: i32,
l: i32,
}
impl TGswSample {
pub(crate) fn new(params: &TGswParams) -> Self {
let k = params.tlwe_params.k;
// Lines / rows
let l = params.l;
// TODO: find out if this is correctamente
let all_sample = vec![vec![TLweSample::new(¶ms.tlwe_params); (k + 1) as usize]; l as usize];
Self { all_sample, k, l }
}
pub(crate) fn encrypt_zero(&mut self, alpha: f64, key: &TGswKey) {
let rl_key = &key.tlwe_key;
self.all_sample[0]
.iter_mut()
.for_each(|s| s.encrypt_zero(alpha, rl_key));
// for p in 0..kpl as usize {
// self.all_sample[0][p].encrypt_zero(alpha, rl_key);
// }
}
pub(crate) fn add_mu_int_h(&mut self, message: i32, params: &TGswParams) {
let h = ¶ms.h;
// TFHE comment: Compute self += H
// My comment: Compute self += H * message (ish)
self.all_sample = self
.all_sample
.iter()
.enumerate()
.map(|(i, is): (usize, &Vec<TLweSample>)| {
is.iter()
.map(|js: &TLweSample| {
let new_a: Vec<TorusPolynomial> = js
.a
.iter()
.map(|a: &TorusPolynomial| {
let new_coefs = a
.coefs
.iter()
.enumerate()
.map(|(coef_idx, coef): (usize, &i32)| {
if coef_idx == 0 {
coef + message * h[i]
} else {
*coef
}
})
.collect::<Vec<Torus32>>();
TorusPolynomial::from(new_coefs)
})
.collect();
TLweSample {
a: new_a,
..js.clone()
}
})
.collect()
})
.collect();
// Is equivalent to:
// for i in 0..l as usize {
// for j in 0..=k as usize {
// self.all_sample[i][j].a[j].coefs[0] += message * h[i];
// }
// }
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_h(&mut self, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
// compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
self.all_sample[i][j].a[j].coefs[0] += h[i];
}
}
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_mu_h(&mut self, message: &IntPolynomial, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
let mu = &message.coefs;
// Compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
let target = &mut self.all_sample[i][j].a[j].coefs;
println!("target coefs befor: {:?}", target);
target
.iter_mut()
.zip_eq(mu.iter())
.for_each(|(t, mu)| *t += mu * h[i]);
println!("target coefs after: {:?}", target);
// for jj in 0..n as usize {
// println!(
// "Target len: {}, mu len: {}, h len: {}, jj: {}, n: {}",
// target.len(),
// mu.len(),
// h.len(),
// jj,
// n
// );
// target[jj] += mu[jj] * h[i];
// }
}
}
}
}
/// Update l'accumulateur ligne 5 de l'algo toujours
/// void tGswTLweDecompH(IntPolynomial* result, const TLweSample* sample,const TGswParams* params);
/// accum * sample
pub(crate) fn tgsw_extern_mul_to_tlwe(
accum: &TLweSample,
sample: &TGswSample,
params: &TGswParams,
) -> TLweSample {
let par = ¶ms.tlwe_params;
let mut result = TLweSample {
a: accum
.a
.iter()
.map(|polynomial| TorusPolynomial::zero(polynomial.len()))
.collect(),
current_variance: 0_f64,
k: accum.k,
};
let mut dec = tgsw_tlwe_decomposition_h(accum, params);
let outer = dec.len();
let inner = dec[0].len();
let mut new_dec = vec![vec![IntPolynomial::zero(0); outer]; inner];
#[allow(clippy::needless_range_loop)]
for x in 0..inner {
for y in 0..outer {
std::mem::swap(&mut new_dec[x][y], &mut dec[y][x]);
}
}
let dec = new_dec;
dec
.iter()
.flatten()
.zip_eq(sample.all_sample.iter().flatten())
.for_each(|(d, a)| result.add_mul_r_(d, a, par));
result
// for i in 0..dec.len() as usize {
// println!("kpl: {}, k: {}, l: {}, i: {}", kpl, par.k, params.l, i);
// // TODO: Figure out if this is supposed to be [0][i] instead, or something else...
// let d = &dec[i][0];
// let ass = &sample.all_sample[i][0];
// accum.add_mul_r_(d, ass, par);
// }
// for (int32_t i = 0; i < kpl; i++) {
// tLweAddMulRTo(accum, &dec[i], &sample->all_sample[i], par);
// }
}
/// Decompose a TLwe-sample by the given TGsw parameters
fn tgsw_tlwe_decomposition_h(sample: &TLweSample, params: &TGswParams) -> Vec<Vec<IntPolynomial>> {
let tlwe_params = ¶ms.tlwe_params;
let k = tlwe_params.k;
let mut result =
vec![vec![IntPolynomial::new(tlwe_params.n); params.l as usize]; (tlwe_params.k + 1) as usize];
for i in 0..=k {
// b=a[k]
tgsw_torus32_polynomial_decomposition_h(
&mut result[(i/* /* TODO: Remove this when you figure this out: Don't think this is necessary? */ * l*/)
as usize],
&sample.a[i as usize],
params,
);
// tGswTorus32PolynomialDecompH(result + (i * l), &sample->a[i], params);
}
result
}
fn tgsw_torus32_polynomial_decomposition_h(
result: &mut Vec<IntPolynomial>,
sample: &TorusPolynomial,
params: &TGswParams,
) {
let n = params.tlwe_params.n;
let l = params.l;
let bg_bit = params.bg_bit;
let mask_mod = params.mask_mod;
let half_bg = params.half_bg;
let offset = params.offset;
// First, add offset to everyone
let buf: Vec<i32> = sample
.coefs
.iter()
.map(|c| c.wrapping_add(offset as i32))
.collect();
// Then, do the decomposition (TODO: in parallel)
#[allow(clippy::needless_range_loop)]
for p in 0..l as usize {
let decal = 32 - (p + 1) as i32 * bg_bit;
let res_p: &mut Vec<i32> = &mut result[p].coefs;
for j in 0..n as usize {
let temp1 = (buf[j] >> decal) & mask_mod as i32;
res_p[j] = temp1 - half_bg;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::polynomial::TorusPolynomial;
fn generate_parameters() -> Vec<TGswParams> {
vec![
TGswParams::new(4, 8, TLweParameters::new(512, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(512, 2, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(1024, 1, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(1024, 2, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(2048, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(2048, 2, 0f64, 1f64)),
]
}
fn generate_keys() -> Vec<TGswKey> {
generate_parameters()
.iter()
.map(TGswKey::generate)
.collect()
}
fn fully_random_tgsw(sample: &mut TGswSample, alpha: f64, params: &TGswParams) {
let l = params.l;
let k = params.tlwe_params.k;
// This is butt-ugly
for j in 0..l as usize {
for i in 0..=k as usize {
let mut row = &mut sample.all_sample[j][i];
for u in 0..=k {
row.a[u as usize] = TorusPolynomial::uniform(row.a.len());
}
row.current_variance = alpha * alpha;
}
}
}
#[test]
fn test_add_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_h(¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { h[i] } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
fn random_int_polynomial(n: i32) -> IntPolynomial {
let mut rng = rand::thread_rng();
use rand::distributions::Distribution;
let d = rand_distr::Uniform::new(i32::MIN, i32::MAX);
let coefs: Vec<i32> = (0..n).map(|_| d.sample(&mut rng) % 10 - 5).collect();
assert_eq!(coefs.len() as i32, n);
IntPolynomial::from(coefs)
}
#[test]
#[ignore]
fn test_add_mu_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
let message = random_int_polynomial(n);
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_mu_h(&message, ¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi*mess:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
if j == u {
new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.zip_eq(message.coefs.iter())
.for_each(|((n, o), m)| assert_eq!(*n, *o + h[i] * (dbg!(*m))));
// for jj in 0..n as usize {
// assert_eq!(
// new_polynomial.coefs[jj],
// old_polynomial.coefs[jj] + h[i] * message.coefs[jj]
// );
// }
} else |
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { dbg!(h[i]) } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
}
| {
assert!(new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.all(|(a, b)| a == b));
} | conditional_block |
tgsw.rs | use crate::numerics::Torus32;
use crate::polynomial::{IntPolynomial, Polynomial, TorusPolynomial};
use crate::tlwe::{TLweKey, TLweParameters, TLweSample};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswParams {
/// Decomposition length
l: i32,
/// log_2(Bg)
bg_bit: i32,
/// Decomposition base (must be a power of 2)
bg: i32,
/// Bg/2
half_bg: i32,
/// Bg-1
mask_mod: u32,
/// Parameters of each row
pub(crate) tlwe_params: TLweParameters,
/// Number of rows = (k+1)*l
kpl: i32,
/// powers of Bgbit
h: Vec<Torus32>,
/// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) + ... + 2^(32-l*Bgbit))
offset: u32,
}
impl TGswParams {
pub fn new(l: i32, bg_bit: i32, tlwe_params: TLweParameters) -> Self {
let bg = (1 << bg_bit) as i32;
let half_bg = (bg >> 1) as i32;
let mask_mod = (bg - 1) as u32;
let mut h = vec![l];
for i in 0..l {
let kk = (32 - (i + 1)) * bg_bit;
// 1/(Bg^(i+1)) as a Torus32
h.push(1_i32.checked_shl(kk as u32).unwrap_or(0));
}
// offset = Bg/2 * (2^(32-Bgbit) + 2^(32-2*Bgbit) + ... + 2^(32-l*Bgbit))
let temp: u32 = (0..1).map(|i| 1 << (32 - (i + 1) * bg_bit)).sum();
let offset = temp * half_bg as u32;
let kpl = (tlwe_params.k + 1) * l;
Self {
l,
bg_bit,
bg,
half_bg,
mask_mod,
tlwe_params,
kpl,
h,
offset,
}
}
}
pub struct TGswKey {
/// Parameters of the TGswKey
pub(crate) params: TGswParams,
// the tlwe params of each rows
// tlwe_params: TLweParameters,
pub(crate) tlwe_key: TLweKey,
}
impl TGswKey {
/// Same key as in TLwe
pub(crate) fn generate(params: &TGswParams) -> Self {
let tlwe_params = params.tlwe_params.clone();
let tlwe_key = TLweKey::generate(&tlwe_params);
Self {
params: params.clone(),
// tlwe_params,
tlwe_key,
}
}
pub(crate) fn encrypt(&self, result: &mut TGswSample, message: i32, alpha: f64) {
result.encrypt_zero(alpha, self);
result.add_mu_int_h(message, &self.params);
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswSample {
/// (k+1)*l TLwe Sample
all_sample: Vec<Vec<TLweSample>>,
/// Horizontal blocks (l lines) of TGSW matrix
// bloc_sample: Vec<TLweSample>,
k: i32,
l: i32,
}
impl TGswSample {
pub(crate) fn new(params: &TGswParams) -> Self {
let k = params.tlwe_params.k;
// Lines / rows
let l = params.l;
// TODO: find out if this is correctamente
let all_sample = vec![vec![TLweSample::new(¶ms.tlwe_params); (k + 1) as usize]; l as usize];
Self { all_sample, k, l }
}
pub(crate) fn encrypt_zero(&mut self, alpha: f64, key: &TGswKey) {
let rl_key = &key.tlwe_key;
self.all_sample[0]
.iter_mut()
.for_each(|s| s.encrypt_zero(alpha, rl_key));
// for p in 0..kpl as usize {
// self.all_sample[0][p].encrypt_zero(alpha, rl_key);
// }
}
pub(crate) fn add_mu_int_h(&mut self, message: i32, params: &TGswParams) {
let h = ¶ms.h;
// TFHE comment: Compute self += H
// My comment: Compute self += H * message (ish)
self.all_sample = self
.all_sample
.iter()
.enumerate()
.map(|(i, is): (usize, &Vec<TLweSample>)| {
is.iter()
.map(|js: &TLweSample| {
let new_a: Vec<TorusPolynomial> = js
.a
.iter()
.map(|a: &TorusPolynomial| {
let new_coefs = a
.coefs
.iter()
.enumerate()
.map(|(coef_idx, coef): (usize, &i32)| {
if coef_idx == 0 {
coef + message * h[i]
} else {
*coef
}
})
.collect::<Vec<Torus32>>();
TorusPolynomial::from(new_coefs)
})
.collect();
TLweSample {
a: new_a,
..js.clone()
}
})
.collect()
})
.collect();
// Is equivalent to:
// for i in 0..l as usize {
// for j in 0..=k as usize {
// self.all_sample[i][j].a[j].coefs[0] += message * h[i];
// }
// }
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_h(&mut self, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
// compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
self.all_sample[i][j].a[j].coefs[0] += h[i];
}
}
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_mu_h(&mut self, message: &IntPolynomial, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
let mu = &message.coefs;
// Compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
let target = &mut self.all_sample[i][j].a[j].coefs;
println!("target coefs befor: {:?}", target);
target
.iter_mut()
.zip_eq(mu.iter())
.for_each(|(t, mu)| *t += mu * h[i]);
println!("target coefs after: {:?}", target);
// for jj in 0..n as usize {
// println!(
// "Target len: {}, mu len: {}, h len: {}, jj: {}, n: {}",
// target.len(),
// mu.len(),
// h.len(),
// jj,
// n
// );
// target[jj] += mu[jj] * h[i];
// }
}
}
}
}
/// Update l'accumulateur ligne 5 de l'algo toujours
/// void tGswTLweDecompH(IntPolynomial* result, const TLweSample* sample,const TGswParams* params);
/// accum * sample
pub(crate) fn tgsw_extern_mul_to_tlwe(
accum: &TLweSample,
sample: &TGswSample,
params: &TGswParams,
) -> TLweSample {
let par = ¶ms.tlwe_params;
let mut result = TLweSample {
a: accum
.a
.iter()
.map(|polynomial| TorusPolynomial::zero(polynomial.len()))
.collect(),
current_variance: 0_f64,
k: accum.k,
};
let mut dec = tgsw_tlwe_decomposition_h(accum, params);
let outer = dec.len();
let inner = dec[0].len();
let mut new_dec = vec![vec![IntPolynomial::zero(0); outer]; inner];
#[allow(clippy::needless_range_loop)]
for x in 0..inner {
for y in 0..outer {
std::mem::swap(&mut new_dec[x][y], &mut dec[y][x]);
}
}
let dec = new_dec;
dec
.iter()
.flatten()
.zip_eq(sample.all_sample.iter().flatten())
.for_each(|(d, a)| result.add_mul_r_(d, a, par));
result
// for i in 0..dec.len() as usize {
// println!("kpl: {}, k: {}, l: {}, i: {}", kpl, par.k, params.l, i);
// // TODO: Figure out if this is supposed to be [0][i] instead, or something else...
// let d = &dec[i][0];
// let ass = &sample.all_sample[i][0];
// accum.add_mul_r_(d, ass, par);
// }
// for (int32_t i = 0; i < kpl; i++) {
// tLweAddMulRTo(accum, &dec[i], &sample->all_sample[i], par);
// }
}
/// Decompose a TLwe-sample by the given TGsw parameters
fn tgsw_tlwe_decomposition_h(sample: &TLweSample, params: &TGswParams) -> Vec<Vec<IntPolynomial>> {
let tlwe_params = ¶ms.tlwe_params;
let k = tlwe_params.k;
let mut result =
vec![vec![IntPolynomial::new(tlwe_params.n); params.l as usize]; (tlwe_params.k + 1) as usize];
for i in 0..=k {
// b=a[k]
tgsw_torus32_polynomial_decomposition_h(
&mut result[(i/* /* TODO: Remove this when you figure this out: Don't think this is necessary? */ * l*/)
as usize],
&sample.a[i as usize],
params,
);
// tGswTorus32PolynomialDecompH(result + (i * l), &sample->a[i], params);
}
result
}
fn tgsw_torus32_polynomial_decomposition_h(
result: &mut Vec<IntPolynomial>,
sample: &TorusPolynomial,
params: &TGswParams,
) {
let n = params.tlwe_params.n;
let l = params.l;
let bg_bit = params.bg_bit;
let mask_mod = params.mask_mod;
let half_bg = params.half_bg;
let offset = params.offset;
// First, add offset to everyone
let buf: Vec<i32> = sample
.coefs
.iter()
.map(|c| c.wrapping_add(offset as i32))
.collect();
// Then, do the decomposition (TODO: in parallel)
#[allow(clippy::needless_range_loop)]
for p in 0..l as usize {
let decal = 32 - (p + 1) as i32 * bg_bit;
let res_p: &mut Vec<i32> = &mut result[p].coefs;
for j in 0..n as usize {
let temp1 = (buf[j] >> decal) & mask_mod as i32;
res_p[j] = temp1 - half_bg;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::polynomial::TorusPolynomial;
fn generate_parameters() -> Vec<TGswParams> {
vec![
TGswParams::new(4, 8, TLweParameters::new(512, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(512, 2, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(1024, 1, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(1024, 2, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(2048, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(2048, 2, 0f64, 1f64)),
]
}
fn generate_keys() -> Vec<TGswKey> {
generate_parameters()
.iter()
.map(TGswKey::generate)
.collect()
}
fn fully_random_tgsw(sample: &mut TGswSample, alpha: f64, params: &TGswParams) {
let l = params.l;
let k = params.tlwe_params.k;
// This is butt-ugly
for j in 0..l as usize {
for i in 0..=k as usize {
let mut row = &mut sample.all_sample[j][i];
for u in 0..=k {
row.a[u as usize] = TorusPolynomial::uniform(row.a.len());
}
row.current_variance = alpha * alpha;
}
}
}
#[test]
fn test_add_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_h(¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { h[i] } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
fn random_int_polynomial(n: i32) -> IntPolynomial {
let mut rng = rand::thread_rng();
use rand::distributions::Distribution;
let d = rand_distr::Uniform::new(i32::MIN, i32::MAX);
let coefs: Vec<i32> = (0..n).map(|_| d.sample(&mut rng) % 10 - 5).collect();
assert_eq!(coefs.len() as i32, n);
IntPolynomial::from(coefs)
}
#[test]
#[ignore]
fn test_add_mu_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
let message = random_int_polynomial(n);
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_mu_h(&message, ¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi*mess:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
if j == u {
new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.zip_eq(message.coefs.iter())
.for_each(|((n, o), m)| assert_eq!(*n, *o + h[i] * (dbg!(*m))));
// for jj in 0..n as usize {
// assert_eq!(
// new_polynomial.coefs[jj],
// old_polynomial.coefs[jj] + h[i] * message.coefs[jj] | // );
// }
} else {
assert!(new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.all(|(a, b)| a == b));
}
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { dbg!(h[i]) } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
} | random_line_split | |
trie.rs | use std::fmt::Debug;
use super::bit_cache::BitCache;
use crate::double_array::DoubleArray;
use bincode;
use serde::Serialize;
use serde::de::DeserializeOwned;
struct Node<T> {
key : u8,
values: Vec<T>,
nexts : Vec<Node<T>>,
}
/// トライ木の実装。
/// ダブル配列は直接構築することはできないので、トライ木を構築してから変換することで構築する。
///
/// # Examples
///
/// ```
/// use std::fmt::Debug;
/// use dary::DoubleArray;
/// use dary::Trie;
/// use serde_derive::{Serialize, Deserialize};
///
/// fn main() {
/// let key1 = String::from("foo");
/// let key2 = String::from("bar");
/// let key3 = String::from("baz");
///
/// let sample1 = Sample { surface: key1.clone(), cost: 1 };
/// let sample2 = Sample { surface: key1.clone(), cost: 2 };
/// let sample3 = Sample { surface: key2.clone(), cost: 1 };
/// let sample4 = Sample { surface: key3.clone(), cost: 1 };
///
/// let mut trie: Trie<Sample> = Trie::new();
/// trie.set(&key1, sample1.clone());
/// trie.set(&key1, sample2.clone());
/// trie.set(&key2, sample3.clone());
/// trie.set(&key3, sample4.clone());
///
/// let double_array = trie.to_double_array().ok().unwrap();
/// assert_eq!(vec![sample1, sample2], double_array.get(&key1).unwrap());
/// assert_eq!(vec![sample3] , double_array.get(&key2).unwrap());
/// assert_eq!(vec![sample4] , double_array.get(&key3).unwrap());
/// }
///
/// #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
/// struct Sample {
/// surface: String,
/// cost: usize,
/// }
/// ```
pub struct Trie<T: Serialize + DeserializeOwned + Debug> {
root: Node<T>,
len: usize,
}
impl<T: Serialize + DeserializeOwned + Debug> Trie<T> {
pub fn new() -> Trie<T> {
Trie {
root: Node { key: 0, values: Vec::new(), nexts: Vec::new() },
len: 0,
}
}
/// trieにノードを追加する
/// 一つのkeyにつき256個までの値を登録できる
/// 超えた場合はpanic
///
/// # Arguments
///
/// * `key` - 追加するキー
/// * `value` - キーに対応する値
pub fn set(&mut self, key: &str, value: T) {
let mut node = &mut self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &mut node.nexts[i];
},
Err(i) => {
node.nexts.insert(i, Node { key: k, values: Vec::new(), nexts: Vec::new() });
node = &mut node.nexts[i];
}
}
}
self.len += 1;
node.values.push(value);
}
/// trieを探索する
/// keyに対応する値が見つかったら値のスライスを返す
///
/// # Arguments
///
/// * `key` - 探索するkey
pub fn get(&self, key: &str) -> Option<&[T]> {
let mut node = &self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &node.nexts[i];
},
Err(_) => {
return None;
}
}
}
if node.values.is_empty() {
None
} else {
Some(&node.values)
}
}
/// トライ木をダブル配列に変換する
///
/// # Panics
/// dataをバイト列に変換できなかった場合にpanicする。
///
/// # Errors
///
///
/// # Arguments
///
/// * `len` - ダブル配列の初期サイズ
pub fn to_double_array(self) -> Result<DoubleArray<T>, std::io::Error> {
let max_key = u8::max_value() as usize + 1; // keyが取りうる値のパターン
let mut len = if max_key > (4 * self.len) { max_key } else { 4 * self.len };
let mut base_arr: Vec<u32> = vec![0; len];
let mut check_arr: Vec<u32> = vec![0; len];
let mut data_arr: Vec<u8> = Vec::with_capacity(self.len);
let mut bit_cache: BitCache = BitCache::new();
bit_cache.set(0);
bit_cache.set(1); | while !stack.is_empty() {
let (curr_idx, mut node) = stack.pop().unwrap();
bit_cache.update_start();
// base値を探索・セット
if !node.values.is_empty() {
// valuesが存在する場合はkey=255のノードとして計算する
node.nexts.push(Node { key: u8::max_value(), values: vec![], nexts: vec![] });
}
let base: usize = Self::find_base(&node.nexts, &bit_cache);
base_arr[curr_idx] = base as u32;
// 配列の長さが足りなければ配列を拡張
if base + max_key >= len {
len = len * 2;
base_arr.resize(len, 0);
check_arr.resize(len, 0);
}
// 新しいノードをダブル配列に登録
for n in node.nexts {
let i = base + (n.key as usize);
bit_cache.set(i);
check_arr[i] = curr_idx as u32;
if n.key == u8::max_value() {
// valueノードの登録
// base には data の開始 index を格納する
base_arr[i] = data_arr.len() as u32;
// data には末尾に values を追加する
let data = bincode::serialize(&node.values).unwrap();
data_arr.extend_from_slice(&data);
} else {
// 通常ノードの登録
stack.push((i, n));
}
}
}
// 配列のりサイズ
let new_len = match bit_cache.last_index_of_one() {
None => max_key,
Some(new_len) => new_len + max_key,
};
base_arr.resize(new_len, 0);
check_arr.resize(new_len, 0);
DoubleArray::from_arrays(&base_arr, &check_arr, &data_arr)
}
/// 新しいbase値を探索するメソッド
///
/// # Arguments
///
/// * `nodes` - 追加対象のノード
/// * `bit_cache` - BitCacheのインスタンス
/// * `with_zero` - key=0のノードも考慮してbase値を探す
fn find_base(nodes: &[Node<T>], bit_cache: &BitCache) -> usize {
if nodes.is_empty() {
panic!("探索すべきノードがありません");
}
let first_key = nodes[0].key as usize;
let mut offset = 0;
'outer: loop {
let empty_idx = bit_cache.find_empty_idx(offset);
let new_base = empty_idx - first_key;
if empty_idx < 256 {
panic!("empty_idx={}, first_key={}", empty_idx, first_key);
}
// すべてのノードが重複せずに配置できるかをチェック
'inner: for next in nodes {
if bit_cache.get(new_base + next.key as usize) != 0 {
// 空じゃなかった場合はnew_baseを探すとこからやり直し
offset += 1;
continue 'outer;
}
}
return new_base;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trie_1() {
let mut trie: Trie<i32> = Trie::new();
let s = String::from("abc");
trie.set(&s, 0);
trie.set(&s, 1);
// 登録されたkeyと値が一致している
assert_eq!(0, trie.get(&s).unwrap()[0]);
assert_eq!(1, trie.get(&s).unwrap()[1]);
let s = String::from("cba");
// 登録されていないkeyはNoneを返す
assert_eq!(None, trie.get(&s));
}
#[test]
fn test_trie_2() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("abd");
let s3 = String::from("zyx");
let s4 = String::from("zwx");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s1, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(14, trie.get(&s1).unwrap()[1]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
}
#[test]
fn test_trie_3() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("あいうえお");
let s2 = String::from("あいえうお");
let s3 = String::from("漢字");
let s4 = String::from("平仮名");
let s5 = String::from("片仮名");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s5, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
assert_eq!(14, trie.get(&s5).unwrap()[0]);
}
#[test]
fn test_find_base_1() {
let nodes: Vec<Node<u32>> = vec![
Node::<u32> { key: 2 , values: vec![], nexts: vec![] },
Node::<u32> { key: 5 , values: vec![], nexts: vec![] },
Node::<u32> { key: 255, values: vec![], nexts: vec![] },
];
let mut bit_cache = BitCache::new();
// 探索開始位置 = 256。空きindex = 256
// base値 = 空きindex - 先頭ノードのkey = 256 - 2 = 254
assert_eq!(254, Trie::find_base(&nodes, &bit_cache));
// 0 ~ 399, 500 ~ 999 を埋める
(256..400).for_each(|i| bit_cache.set(i));
(500..1000).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1000
// base値 = 空きindex - 先頭ノードのkey = 1000 - 2 = 998
assert_eq!(998, Trie::find_base(&nodes, &bit_cache));
//1000..1002, 1003..1005, 1006..1255 を埋める
(1000..1002).for_each(|i| bit_cache.set(i));
(1003..1005).for_each(|i| bit_cache.set(i));
(1006..1255).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1002
// base値 = 空きindex - 先頭ノードのkey = 1002 - 2 = 1000
assert_eq!(1000, Trie::find_base(&nodes, &bit_cache));
// 400 ~ 500 を埋める
(400..500).for_each(|i| bit_cache.set(i));
// 探索開始位置=1216。空きindex = 1255
// base値 = 空きindex - 先頭ノードのkey = 1255 - 2 = 1253
bit_cache.update_start();
assert_eq!(1253, Trie::find_base(&nodes, &bit_cache));
}
#[test]
#[should_panic(expected = "探索すべきノードがありません")]
fn test_find_base_2() {
let nodes: Vec<Node<u32>> = vec![];
let bit_cache = BitCache::new();
// nodesが空でwith_zero=falseの場合は、base値を求められないのでpanic
Trie::find_base(&nodes, &bit_cache);
}
#[test]
fn test_to_double_array_1() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("ac");
let s3 = String::from("b");
let s4 = String::from("bd");
let s5 = String::from("bdc");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
trie.set(&s4, 5);
trie.set(&s5, 6);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3], double_array.get(&s2).unwrap());
assert_eq!(vec![4], double_array.get(&s3).unwrap());
assert_eq!(vec![5], double_array.get(&s4).unwrap());
assert_eq!(vec![6], double_array.get(&s5).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("ab"));
}
#[test]
fn test_to_double_array_2() {
let trie: Trie<u32> = Trie::new();
let double_array = trie.to_double_array().ok().unwrap();
// 遷移できない場合はpanicする
assert_eq!(None, double_array.get("abc"));
}
#[test]
fn test_to_double_array_3() {
// マルチバイト文字のテスト
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("おすしとビール");
let s2 = String::from("お寿司とビール");
let s3 = String::from("🍣🍺");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3] , double_array.get(&s2).unwrap());
assert_eq!(vec![4] , double_array.get(&s3).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("お寿"));
}
} | let mut stack: Vec<(usize, Node<T>)> = Vec::with_capacity(self.len);
if !self.root.nexts.is_empty() {
stack.push((1, self.root));
}
| random_line_split |
trie.rs | use std::fmt::Debug;
use super::bit_cache::BitCache;
use crate::double_array::DoubleArray;
use bincode;
use serde::Serialize;
use serde::de::DeserializeOwned;
struct Node<T> {
key : u8,
values: Vec<T>,
nexts : Vec<Node<T>>,
}
/// トライ木の実装。
/// ダブル配列は直接構築することはできないので、トライ木を構築してから変換することで構築する。
///
/// # Examples
///
/// ```
/// use std::fmt::Debug;
/// use dary::DoubleArray;
/// use dary::Trie;
/// use serde_derive::{Serialize, Deserialize};
///
/// fn main() {
/// let key1 = String::from("foo");
/// let key2 = String::from("bar");
/// let key3 = String::from("baz");
///
/// let sample1 = Sample { surface: key1.clone(), cost: 1 };
/// let sample2 = Sample { surface: key1.clone(), cost: 2 };
/// let sample3 = Sample { surface: key2.clone(), cost: 1 };
/// let sample4 = Sample { surface: key3.clone(), cost: 1 };
///
/// let mut trie: Trie<Sample> = Trie::new();
/// trie.set(&key1, sample1.clone());
/// trie.set(&key1, sample2.clone());
/// trie.set(&key2, sample3.clone());
/// trie.set(&key3, sample4.clone());
///
/// let double_array = trie.to_double_array().ok().unwrap();
/// assert_eq!(vec![sample1, sample2], double_array.get(&key1).unwrap());
/// assert_eq!(vec![sample3] , double_array.get(&key2).unwrap());
/// assert_eq!(vec![sample4] , double_array.get(&key3).unwrap());
/// }
///
/// #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
/// struct Sample {
/// surface: String,
/// cost: usize,
/// }
/// ```
pub struct Trie<T: Serialize + DeserializeOwned + Debug> {
root: Node<T>,
len: usize,
}
impl<T: Serialize + DeserializeOwned + Debug> Trie<T> {
pub fn new() -> Trie<T> {
Trie {
root: Node { key: 0, values: Vec::new(), nexts: Vec::new() },
len: 0,
}
}
/// trieにノードを追加する
/// 一つのkeyにつき256個までの値を登録できる
/// 超えた場合はpanic
///
/// # Arguments
///
/// * `key` - 追加するキー
/// * `value` - キーに対応する値
pub fn set(&mut self, key: &str, value: T) {
let mut node = &mut self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &mut node.nexts[i];
},
Err(i) => {
node.nexts.insert(i, Node { key: k, values: Vec::new(), nexts: Vec::new() });
node = &mut node.nexts[i];
}
}
}
self.len += 1;
node.values.push(value);
}
/// trieを探索する
/// keyに対応する値が見つかったら値のスライスを返す
///
/// # Arguments
///
/// * `key` - 探索するkey
pub fn get(&self, key: &str) -> Option<&[T]> {
let mut node = &self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &node.nexts[i];
},
Err(_) = | or> {
let max_key = u8::max_value() as usize + 1; // keyが取りうる値のパターン
let mut len = if max_key > (4 * self.len) { max_key } else { 4 * self.len };
let mut base_arr: Vec<u32> = vec![0; len];
let mut check_arr: Vec<u32> = vec![0; len];
let mut data_arr: Vec<u8> = Vec::with_capacity(self.len);
let mut bit_cache: BitCache = BitCache::new();
bit_cache.set(0);
bit_cache.set(1);
let mut stack: Vec<(usize, Node<T>)> = Vec::with_capacity(self.len);
if !self.root.nexts.is_empty() {
stack.push((1, self.root));
}
while !stack.is_empty() {
let (curr_idx, mut node) = stack.pop().unwrap();
bit_cache.update_start();
// base値を探索・セット
if !node.values.is_empty() {
// valuesが存在する場合はkey=255のノードとして計算する
node.nexts.push(Node { key: u8::max_value(), values: vec![], nexts: vec![] });
}
let base: usize = Self::find_base(&node.nexts, &bit_cache);
base_arr[curr_idx] = base as u32;
// 配列の長さが足りなければ配列を拡張
if base + max_key >= len {
len = len * 2;
base_arr.resize(len, 0);
check_arr.resize(len, 0);
}
// 新しいノードをダブル配列に登録
for n in node.nexts {
let i = base + (n.key as usize);
bit_cache.set(i);
check_arr[i] = curr_idx as u32;
if n.key == u8::max_value() {
// valueノードの登録
// base には data の開始 index を格納する
base_arr[i] = data_arr.len() as u32;
// data には末尾に values を追加する
let data = bincode::serialize(&node.values).unwrap();
data_arr.extend_from_slice(&data);
} else {
// 通常ノードの登録
stack.push((i, n));
}
}
}
// 配列のりサイズ
let new_len = match bit_cache.last_index_of_one() {
None => max_key,
Some(new_len) => new_len + max_key,
};
base_arr.resize(new_len, 0);
check_arr.resize(new_len, 0);
DoubleArray::from_arrays(&base_arr, &check_arr, &data_arr)
}
/// 新しいbase値を探索するメソッド
///
/// # Arguments
///
/// * `nodes` - 追加対象のノード
/// * `bit_cache` - BitCacheのインスタンス
/// * `with_zero` - key=0のノードも考慮してbase値を探す
fn find_base(nodes: &[Node<T>], bit_cache: &BitCache) -> usize {
if nodes.is_empty() {
panic!("探索すべきノードがありません");
}
let first_key = nodes[0].key as usize;
let mut offset = 0;
'outer: loop {
let empty_idx = bit_cache.find_empty_idx(offset);
let new_base = empty_idx - first_key;
if empty_idx < 256 {
panic!("empty_idx={}, first_key={}", empty_idx, first_key);
}
// すべてのノードが重複せずに配置できるかをチェック
'inner: for next in nodes {
if bit_cache.get(new_base + next.key as usize) != 0 {
// 空じゃなかった場合はnew_baseを探すとこからやり直し
offset += 1;
continue 'outer;
}
}
return new_base;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trie_1() {
let mut trie: Trie<i32> = Trie::new();
let s = String::from("abc");
trie.set(&s, 0);
trie.set(&s, 1);
// 登録されたkeyと値が一致している
assert_eq!(0, trie.get(&s).unwrap()[0]);
assert_eq!(1, trie.get(&s).unwrap()[1]);
let s = String::from("cba");
// 登録されていないkeyはNoneを返す
assert_eq!(None, trie.get(&s));
}
#[test]
fn test_trie_2() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("abd");
let s3 = String::from("zyx");
let s4 = String::from("zwx");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s1, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(14, trie.get(&s1).unwrap()[1]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
}
#[test]
fn test_trie_3() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("あいうえお");
let s2 = String::from("あいえうお");
let s3 = String::from("漢字");
let s4 = String::from("平仮名");
let s5 = String::from("片仮名");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s5, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
assert_eq!(14, trie.get(&s5).unwrap()[0]);
}
#[test]
fn test_find_base_1() {
let nodes: Vec<Node<u32>> = vec![
Node::<u32> { key: 2 , values: vec![], nexts: vec![] },
Node::<u32> { key: 5 , values: vec![], nexts: vec![] },
Node::<u32> { key: 255, values: vec![], nexts: vec![] },
];
let mut bit_cache = BitCache::new();
// 探索開始位置 = 256。空きindex = 256
// base値 = 空きindex - 先頭ノードのkey = 256 - 2 = 254
assert_eq!(254, Trie::find_base(&nodes, &bit_cache));
// 0 ~ 399, 500 ~ 999 を埋める
(256..400).for_each(|i| bit_cache.set(i));
(500..1000).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1000
// base値 = 空きindex - 先頭ノードのkey = 1000 - 2 = 998
assert_eq!(998, Trie::find_base(&nodes, &bit_cache));
//1000..1002, 1003..1005, 1006..1255 を埋める
(1000..1002).for_each(|i| bit_cache.set(i));
(1003..1005).for_each(|i| bit_cache.set(i));
(1006..1255).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1002
// base値 = 空きindex - 先頭ノードのkey = 1002 - 2 = 1000
assert_eq!(1000, Trie::find_base(&nodes, &bit_cache));
// 400 ~ 500 を埋める
(400..500).for_each(|i| bit_cache.set(i));
// 探索開始位置=1216。空きindex = 1255
// base値 = 空きindex - 先頭ノードのkey = 1255 - 2 = 1253
bit_cache.update_start();
assert_eq!(1253, Trie::find_base(&nodes, &bit_cache));
}
#[test]
#[should_panic(expected = "探索すべきノードがありません")]
fn test_find_base_2() {
let nodes: Vec<Node<u32>> = vec![];
let bit_cache = BitCache::new();
// nodesが空でwith_zero=falseの場合は、base値を求められないのでpanic
Trie::find_base(&nodes, &bit_cache);
}
#[test]
fn test_to_double_array_1() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("ac");
let s3 = String::from("b");
let s4 = String::from("bd");
let s5 = String::from("bdc");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
trie.set(&s4, 5);
trie.set(&s5, 6);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3], double_array.get(&s2).unwrap());
assert_eq!(vec![4], double_array.get(&s3).unwrap());
assert_eq!(vec![5], double_array.get(&s4).unwrap());
assert_eq!(vec![6], double_array.get(&s5).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("ab"));
}
#[test]
fn test_to_double_array_2() {
let trie: Trie<u32> = Trie::new();
let double_array = trie.to_double_array().ok().unwrap();
// 遷移できない場合はpanicする
assert_eq!(None, double_array.get("abc"));
}
#[test]
fn test_to_double_array_3() {
// マルチバイト文字のテスト
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("おすしとビール");
let s2 = String::from("お寿司とビール");
let s3 = String::from("🍣🍺");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3] , double_array.get(&s2).unwrap());
assert_eq!(vec![4] , double_array.get(&s3).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("お寿"));
}
}
| > {
return None;
}
}
}
if node.values.is_empty() {
None
} else {
Some(&node.values)
}
}
/// トライ木をダブル配列に変換する
///
/// # Panics
/// dataをバイト列に変換できなかった場合にpanicする。
///
/// # Errors
///
///
/// # Arguments
///
/// * `len` - ダブル配列の初期サイズ
pub fn to_double_array(self) -> Result<DoubleArray<T>, std::io::Err | identifier_body |
trie.rs | use std::fmt::Debug;
use super::bit_cache::BitCache;
use crate::double_array::DoubleArray;
use bincode;
use serde::Serialize;
use serde::de::DeserializeOwned;
struct Node<T> {
key : u8,
values: Vec<T>,
nexts : Vec<Node<T>>,
}
/// トライ木の実装。
/// ダブル配列は直接構築することはできないので、トライ木を構築してから変換することで構築する。
///
/// # Examples
///
/// ```
/// use std::fmt::Debug;
/// use dary::DoubleArray;
/// use dary::Trie;
/// use serde_derive::{Serialize, Deserialize};
///
/// fn main() {
/// let key1 = String::from("foo");
/// let key2 = String::from("bar");
/// let key3 = String::from("baz");
///
/// let sample1 = Sample { surface: key1.clone(), cost: 1 };
/// let sample2 = Sample { surface: key1.clone(), cost: 2 };
/// let sample3 = Sample { surface: key2.clone(), cost: 1 };
/// let sample4 = Sample { surface: key3.clone(), cost: 1 };
///
/// let mut trie: Trie<Sample> = Trie::new();
/// trie.set(&key1, sample1.clone());
/// trie.set(&key1, sample2.clone());
/// trie.set(&key2, sample3.clone());
/// trie.set(&key3, sample4.clone());
///
/// let double_array = trie.to_double_array().ok().unwrap();
/// assert_eq!(vec![sample1, sample2], double_array.get(&key1).unwrap());
/// assert_eq!(vec![sample3] , double_array.get(&key2).unwrap());
/// assert_eq!(vec![sample4] , double_array.get(&key3).unwrap());
/// }
///
/// #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
/// struct Sample {
/// surface: String,
/// cost: usize,
/// }
/// ```
pub struct Trie<T: Serialize + DeserializeOwned + Debug> {
root: Node<T>,
len: usize,
}
impl<T: Serialize + DeserializeOwned + Debug> Trie<T> {
pub fn new() -> Trie<T> {
Trie {
root: Node { key: 0, values: Vec::new(), nexts: Vec::new() },
len: 0,
}
}
/// trieにノードを追加する
/// 一つのkeyにつき256個までの値を登録できる
/// 超えた場合はpanic
///
/// # Arguments
///
/// * `key` - 追加するキー
/// * `value` - キーに対応する値
pub fn set(&mut self, key: &str, value: T) {
let mut node = &mut self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &mut node.nexts[i];
},
Err(i) => {
node.nexts.insert(i, Node { key: k, values: Vec::new(), nexts: Vec::new() });
node = &mut node.nexts[i];
}
}
}
self.len += 1;
node.values.push(value);
}
/// trieを探索する
/// keyに対応する値が見つかったら値のスライスを返す
///
/// # Arguments
///
/// * `key` - 探索するkey
pub fn get(&self, key: &str) -> Option<&[T]> {
let mut node = &self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &node.nexts[i];
},
Err(_) => {
return None;
}
}
}
if node.values.is_empty() {
None
} else {
Some(&node.values)
}
}
/// トライ木をダブル配列に変換する
///
/// # Panics
/// dataをバイト列に変換できなかった場合にpanicする。
///
/// # Errors
///
///
/// # Arguments
///
/// * `len` - ダブル配列の初期サイズ
pub fn to_double_array(self) -> Result<DoubleArray<T>, std::io::Error> {
let max_key = u8::max_value() as usize + 1; // keyが取りうる値のパターン
let mut len = if max_key > (4 * self.len) { max_key } else { 4 * self.len };
let mut base_arr: Vec<u32> = vec![0; len];
let mut check_arr: Vec<u32> = vec![0; len];
let mut data_arr: Vec<u8> = Vec::with_capacity(self.len);
let mut bit_cache: BitCache = BitCache::new();
bit_cache.set(0);
bit_cache.set(1);
let mut stack: Vec<(usize, Node<T>)> = Vec::with_capacity(self.len);
if !self.root.nexts.is_empty() {
stack.push((1, self.root));
}
while !stack.is_empty() {
let (curr_idx, mut node) = stack.pop().unwrap();
bit_cache.update_start();
// base値を探索・セット
if !node.values.is_empty() {
// valuesが存在する場合はkey=255のノードとして計算する
node.nexts.push(Node { key: u8::max_value(), values: vec![], nexts: vec![] });
}
let base: usize = Self::find_base(&node.nexts, &bit_cache);
base_arr[curr_idx] = base as u32;
// 配列の長さが足りなければ配列を拡張
if base + max_key >= len {
len = len * 2;
base_arr.resize(len, 0);
check_arr.resize(len, 0);
}
// 新しいノードをダブル配列に登録
for n in node.nexts {
let i = base + (n.key as usize);
bit_cache.set(i);
check_arr[i] = curr_idx as u32;
if n.key == u8::max_value() {
// valueノードの登録
// base には data の開始 index を格納する
base_arr[i] = data_arr.len() as u32;
// data には末尾に values を追加する
let data = bincode::serialize(&node.values).unwrap();
data_arr.extend_from_slice(&data);
} else {
// 通常ノードの登録
stack.push((i, n));
}
}
}
// 配列のりサイズ
let new_len = match bit_cache.last_index_of_one() {
None => max_key,
Some(new_len) => new_len + max_key,
};
base_arr.resize(new_len, 0);
check_arr.resize(new_len, 0);
DoubleArray::from_arrays(&base_arr, &check_arr, &data_arr)
}
/// 新しいbase値を探索するメソッド
///
/// # Arguments
///
/// * `nodes` - 追加対象のノード
/// * `bit_cache` - BitCacheのインスタンス
/// * `with_zero` - key=0のノードも考慮してbase値を探す
fn find_base(nodes: &[Node<T>], bit_cache: &BitCache) -> usize {
if nodes.is_empty() {
panic!("探索すべきノードがありません");
}
let first_key = nodes[0].key as usize;
let mut offset = 0;
'outer: loop {
let empty_idx = bit_cache.find_empty_idx(offset);
let new_base = empty_idx - first_key;
if empty_idx < 256 {
panic!("empty_idx={}, first_key={}", empty_idx, first_key);
}
// すべてのノードが重複せずに配置できるかをチェック
'inner: for next in nodes {
if bit_cache.get(new_base + next.key as usize) != 0 {
// 空じゃなかった場合はnew_baseを探すとこからやり直し
offset += 1;
continue 'outer;
}
}
return new_base;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trie_1() {
let mut trie: Trie<i32> = Trie::new();
let s = String::from("abc");
trie.set(&s, 0);
trie.set(&s, 1);
// 登録されたkeyと値が一致している
assert_eq!(0, trie.get(&s).unwrap()[0]);
assert_eq!(1, trie.get(&s).unwrap()[1]);
let s = String::from("cba");
// 登録されていないkeyはNoneを返す
assert_eq!(None, trie.get(&s));
}
#[test]
fn test_trie_2() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("abd");
let s3 = String::from("zyx");
let s4 = String::from("zwx");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s1, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(14, trie.get(&s1).unwrap()[1]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
}
#[test]
fn test_trie_3() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("あいうえお");
let s2 = String::from("あいえうお");
let s3 = String::from("漢字");
let s4 = String::from("平仮名");
let s5 = String::from("片仮名");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s5, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
assert_eq!(14, trie.get(&s5).unwrap()[0]);
}
#[test]
fn test_find_base_1() {
let nodes: Vec<Node<u32>> = vec![
Node::<u32> { key: 2 , values: vec![], nexts: vec![] },
Node::<u32> { key: 5 , values: vec![], nexts: vec![] },
Node::<u32> { key: 255, values: vec![], nexts: vec![] },
];
let mut bit_cache = BitCache::new();
// 探索開始位置 = 256。空きindex = 256
// base値 = 空きindex - 先頭ノードのkey = 256 - 2 = 254
assert_eq!(254, Trie::find_base(&nodes, &bit_cache));
// 0 ~ 399, 500 ~ 999 を埋める
(256..400).for_each(|i| bit_cache.set(i));
(500..1000).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1000
// base値 = 空きindex - 先頭ノードのkey = 1000 - 2 = 998
assert_eq!(998, Trie::find_base(&nodes, &bit_cache));
//1000..1002, 1003..1005, 1006..1255 を埋める
(1000..1002).for_each(|i| bit_cache.set(i));
(1003..1005).for_each(|i| bit_cache.set(i));
(1006..1255).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1002
// base値 = 空きindex - 先頭ノードのkey = 1002 - 2 = 1000
assert_eq!(1000, Trie::find_base(&nodes, &bit_cache));
// 400 ~ 500 を埋める
(400..500).for_each(|i| bit_cache.set(i));
// 探索開始位置=1216。空きindex = 1255
// base値 = 空きindex - 先頭ノードのkey = 1255 - 2 = 1253
bit_cache.update_start();
assert_eq!(1253, Trie::find_base(&nodes, &bit_cache));
}
#[test]
#[should_panic(expected = "探索すべきノードがありません")]
fn test_find_base_2() {
let nodes: Vec<Node<u32>> = vec![];
let bit_cache = BitCache::new();
// nodesが空でwith_zero=falseの場合は、base値を求められないのでpanic
Trie::find_base(&nodes, &bit_cache);
}
#[test]
fn test_to_double_array_1() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("ac");
let s3 = String::from("b");
let s4 = String::from("bd");
let s5 = String::from("bdc");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
trie.set(&s4, 5);
trie.set(&s5, 6);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3], double_array.get(&s2).unwrap());
assert_eq!(vec![4], double_array.get(&s3).unwrap());
assert_eq!(vec![5], double_array.get(&s4).unwrap());
assert_eq!(vec![6], double_array.get(&s5).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("ab"));
}
#[test]
fn test_to_double_array_2() {
let trie: Trie<u32> = Trie::new();
let double_array = trie.to_double_array().ok().unwrap();
// 遷移できない場合はpanicする
assert_eq!(None, double_array.get("abc"));
}
#[test]
fn test_to_double_array_3() {
// マルチバイト文字のテスト
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("おすしとビール");
let s2 = String::from("お寿司とビール");
let s3 = String::from("🍣🍺");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3] , double_array.get(&s2).unwrap());
assert_eq!(vec![4] , double_array.get(&s3).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("お寿"));
}
}
| identifier_name | ||
trie.rs | use std::fmt::Debug;
use super::bit_cache::BitCache;
use crate::double_array::DoubleArray;
use bincode;
use serde::Serialize;
use serde::de::DeserializeOwned;
struct Node<T> {
key : u8,
values: Vec<T>,
nexts : Vec<Node<T>>,
}
/// トライ木の実装。
/// ダブル配列は直接構築することはできないので、トライ木を構築してから変換することで構築する。
///
/// # Examples
///
/// ```
/// use std::fmt::Debug;
/// use dary::DoubleArray;
/// use dary::Trie;
/// use serde_derive::{Serialize, Deserialize};
///
/// fn main() {
/// let key1 = String::from("foo");
/// let key2 = String::from("bar");
/// let key3 = String::from("baz");
///
/// let sample1 = Sample { surface: key1.clone(), cost: 1 };
/// let sample2 = Sample { surface: key1.clone(), cost: 2 };
/// let sample3 = Sample { surface: key2.clone(), cost: 1 };
/// let sample4 = Sample { surface: key3.clone(), cost: 1 };
///
/// let mut trie: Trie<Sample> = Trie::new();
/// trie.set(&key1, sample1.clone());
/// trie.set(&key1, sample2.clone());
/// trie.set(&key2, sample3.clone());
/// trie.set(&key3, sample4.clone());
///
/// let double_array = trie.to_double_array().ok().unwrap();
/// assert_eq!(vec![sample1, sample2], double_array.get(&key1).unwrap());
/// assert_eq!(vec![sample3] , double_array.get(&key2).unwrap());
/// assert_eq!(vec![sample4] , double_array.get(&key3).unwrap());
/// }
///
/// #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
/// struct Sample {
/// surface: String,
/// cost: usize,
/// }
/// ```
pub struct Trie<T: Serialize + DeserializeOwned + Debug> {
root: Node<T>,
len: usize,
}
impl<T: Serialize + DeserializeOwned + Debug> Trie<T> {
pub fn new() -> Trie<T> {
Trie {
root: Node { key: 0, values: Vec::new(), nexts: Vec::new() },
len: 0,
}
}
/// trieにノードを追加する
/// 一つのkeyにつき256個までの値を登録できる
/// 超えた場合はpanic
///
/// # Arguments
///
/// * `key` - 追加するキー
/// * `value` - キーに対応する値
pub fn set(&mut self, key: &str, value: T) {
let mut node = &mut self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &mut node.nexts[i];
},
Err(i) => {
node.nexts.insert(i, Node { key: k, values: Vec::new(), nexts: Vec::new() });
node = &mut node.nexts[i];
}
}
}
self.len += 1;
node.values.push(value);
}
/// trieを探索する
/// keyに対応する値が見つかったら値のスライスを返す
///
/// # Arguments
///
/// * `key` - 探索するkey
pub fn get(&self, key: &str) -> Option<&[T]> {
let mut node = &self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &node.nexts[i];
},
Err(_) => {
return None;
}
}
}
if node.values.is_empty() {
None
} else {
Some(&node.values)
}
}
/// トライ木をダブル配列に変換する
///
/// # Panics
/// dataをバイト列に変換できなかった場合にpanicする。
///
/// # Errors
///
///
/// # Arguments
///
/// * `len` - ダブル配列の初期サイズ
pub fn to_double_ar | o::Error> {
let max_key = u8::max_value() as usize + 1; // keyが取りうる値のパターン
let mut len = if max_key > (4 * self.len) { max_key } else { 4 * self.len };
let mut base_arr: Vec<u32> = vec![0; len];
let mut check_arr: Vec<u32> = vec![0; len];
let mut data_arr: Vec<u8> = Vec::with_capacity(self.len);
let mut bit_cache: BitCache = BitCache::new();
bit_cache.set(0);
bit_cache.set(1);
let mut stack: Vec<(usize, Node<T>)> = Vec::with_capacity(self.len);
if !self.root.nexts.is_empty() {
stack.push((1, self.root));
}
while !stack.is_empty() {
let (curr_idx, mut node) = stack.pop().unwrap();
bit_cache.update_start();
// base値を探索・セット
if !node.values.is_empty() {
// valuesが存在する場合はkey=255のノードとして計算する
node.nexts.push(Node { key: u8::max_value(), values: vec![], nexts: vec![] });
}
let base: usize = Self::find_base(&node.nexts, &bit_cache);
base_arr[curr_idx] = base as u32;
// 配列の長さが足りなければ配列を拡張
if base + max_key >= len {
len = len * 2;
base_arr.resize(len, 0);
check_arr.resize(len, 0);
}
// 新しいノードをダブル配列に登録
for n in node.nexts {
let i = base + (n.key as usize);
bit_cache.set(i);
check_arr[i] = curr_idx as u32;
if n.key == u8::max_value() {
// valueノードの登録
// base には data の開始 index を格納する
base_arr[i] = data_arr.len() as u32;
// data には末尾に values を追加する
let data = bincode::serialize(&node.values).unwrap();
data_arr.extend_from_slice(&data);
} else {
// 通常ノードの登録
stack.push((i, n));
}
}
}
// 配列のりサイズ
let new_len = match bit_cache.last_index_of_one() {
None => max_key,
Some(new_len) => new_len + max_key,
};
base_arr.resize(new_len, 0);
check_arr.resize(new_len, 0);
DoubleArray::from_arrays(&base_arr, &check_arr, &data_arr)
}
/// 新しいbase値を探索するメソッド
///
/// # Arguments
///
/// * `nodes` - 追加対象のノード
/// * `bit_cache` - BitCacheのインスタンス
/// * `with_zero` - key=0のノードも考慮してbase値を探す
fn find_base(nodes: &[Node<T>], bit_cache: &BitCache) -> usize {
if nodes.is_empty() {
panic!("探索すべきノードがありません");
}
let first_key = nodes[0].key as usize;
let mut offset = 0;
'outer: loop {
let empty_idx = bit_cache.find_empty_idx(offset);
let new_base = empty_idx - first_key;
if empty_idx < 256 {
panic!("empty_idx={}, first_key={}", empty_idx, first_key);
}
// すべてのノードが重複せずに配置できるかをチェック
'inner: for next in nodes {
if bit_cache.get(new_base + next.key as usize) != 0 {
// 空じゃなかった場合はnew_baseを探すとこからやり直し
offset += 1;
continue 'outer;
}
}
return new_base;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trie_1() {
let mut trie: Trie<i32> = Trie::new();
let s = String::from("abc");
trie.set(&s, 0);
trie.set(&s, 1);
// 登録されたkeyと値が一致している
assert_eq!(0, trie.get(&s).unwrap()[0]);
assert_eq!(1, trie.get(&s).unwrap()[1]);
let s = String::from("cba");
// 登録されていないkeyはNoneを返す
assert_eq!(None, trie.get(&s));
}
#[test]
fn test_trie_2() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("abd");
let s3 = String::from("zyx");
let s4 = String::from("zwx");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s1, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(14, trie.get(&s1).unwrap()[1]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
}
#[test]
fn test_trie_3() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("あいうえお");
let s2 = String::from("あいえうお");
let s3 = String::from("漢字");
let s4 = String::from("平仮名");
let s5 = String::from("片仮名");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s5, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
assert_eq!(14, trie.get(&s5).unwrap()[0]);
}
#[test]
fn test_find_base_1() {
let nodes: Vec<Node<u32>> = vec![
Node::<u32> { key: 2 , values: vec![], nexts: vec![] },
Node::<u32> { key: 5 , values: vec![], nexts: vec![] },
Node::<u32> { key: 255, values: vec![], nexts: vec![] },
];
let mut bit_cache = BitCache::new();
// 探索開始位置 = 256。空きindex = 256
// base値 = 空きindex - 先頭ノードのkey = 256 - 2 = 254
assert_eq!(254, Trie::find_base(&nodes, &bit_cache));
// 0 ~ 399, 500 ~ 999 を埋める
(256..400).for_each(|i| bit_cache.set(i));
(500..1000).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1000
// base値 = 空きindex - 先頭ノードのkey = 1000 - 2 = 998
assert_eq!(998, Trie::find_base(&nodes, &bit_cache));
//1000..1002, 1003..1005, 1006..1255 を埋める
(1000..1002).for_each(|i| bit_cache.set(i));
(1003..1005).for_each(|i| bit_cache.set(i));
(1006..1255).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1002
// base値 = 空きindex - 先頭ノードのkey = 1002 - 2 = 1000
assert_eq!(1000, Trie::find_base(&nodes, &bit_cache));
// 400 ~ 500 を埋める
(400..500).for_each(|i| bit_cache.set(i));
// 探索開始位置=1216。空きindex = 1255
// base値 = 空きindex - 先頭ノードのkey = 1255 - 2 = 1253
bit_cache.update_start();
assert_eq!(1253, Trie::find_base(&nodes, &bit_cache));
}
#[test]
#[should_panic(expected = "探索すべきノードがありません")]
fn test_find_base_2() {
let nodes: Vec<Node<u32>> = vec![];
let bit_cache = BitCache::new();
// nodesが空でwith_zero=falseの場合は、base値を求められないのでpanic
Trie::find_base(&nodes, &bit_cache);
}
#[test]
fn test_to_double_array_1() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("ac");
let s3 = String::from("b");
let s4 = String::from("bd");
let s5 = String::from("bdc");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
trie.set(&s4, 5);
trie.set(&s5, 6);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3], double_array.get(&s2).unwrap());
assert_eq!(vec![4], double_array.get(&s3).unwrap());
assert_eq!(vec![5], double_array.get(&s4).unwrap());
assert_eq!(vec![6], double_array.get(&s5).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("ab"));
}
#[test]
fn test_to_double_array_2() {
let trie: Trie<u32> = Trie::new();
let double_array = trie.to_double_array().ok().unwrap();
// 遷移できない場合はpanicする
assert_eq!(None, double_array.get("abc"));
}
#[test]
fn test_to_double_array_3() {
// マルチバイト文字のテスト
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("おすしとビール");
let s2 = String::from("お寿司とビール");
let s3 = String::from("🍣🍺");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3] , double_array.get(&s2).unwrap());
assert_eq!(vec![4] , double_array.get(&s3).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("お寿"));
}
}
| ray(self) -> Result<DoubleArray<T>, std::i | conditional_block |
doctype.py | # Copyright (c) 2012 Web Notes Technologies Pvt Ltd (http://erpnext.com)
#
# MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# TODO:
# Patch: Remove DocFormat
# imports
import webnotes
import webnotes.model
import webnotes.model.doc
from webnotes.utils.cache import CacheItem
class _DocType:
"""
The _DocType object is created internally using the module's `get` method.
"""
def __init__(self, name):
self.name = name
def make_doclist(self, form=1):
"""
"""
# do not load from cache if auto cache clear is enabled
import conf
from_cache = True
if hasattr(conf, 'auto_cache_clear'):
from_cache = not conf.auto_cache_clear
if form and from_cache:
cached_doclist = self.load_from_cache()
if cached_doclist: return cached_doclist
# Get parent doc and its fields
doclist = webnotes.model.doc.get('DocType', self.name, 1)
doclist += self.get_custom_fields(self.name)
if form:
table_fields = [t[0] for t in self.get_table_fields(doclist)]
# for each unique table
for t in list(set(table_fields)):
# Get child doc and its fields
table_doclist = webnotes.model.doc.get('DocType', t, 1)
table_doclist += self.get_custom_fields(t)
doclist += table_doclist
self.apply_property_setters(doclist)
if form:
self.load_select_options(doclist)
self.add_code(doclist[0])
self.load_print_formats(doclist)
self.insert_into_cache(doclist)
return doclist
def get_custom_fields(self, doc_type):
"""
Gets a list of custom field docs masked as type DocField
"""
custom_doclist = []
res = webnotes.conn.sql("""SELECT * FROM `tabCustom Field` | for r in res:
# Cheat! Mask Custom Field as DocField
custom_field = webnotes.model.doc.Document(fielddata=r)
self.mask_custom_field(custom_field, doc_type)
custom_doclist.append(custom_field)
return custom_doclist
def mask_custom_field(self, custom_field, doc_type):
"""
Masks doctype and parent related properties of Custom Field as that
of DocField
"""
custom_field.fields.update({
'doctype': 'DocField',
'parent': doc_type,
'parentfield': 'fields',
'parenttype': 'DocType',
})
def get_table_fields(self, doclist):
"""
Returns [[options, fieldname]] of fields of type 'Table'
"""
table_fields = []
for d in doclist:
if d.doctype=='DocField' and d.fieldtype == 'Table':
table_fields.append([d.options, d.fieldname])
return table_fields
def apply_property_setters(self, doclist):
"""
"""
property_dict, doc_type_list = self.get_property_setters(doclist)
for d in doclist:
self.update_field_properties(d, property_dict)
self.apply_previous_field_properties(doclist, property_dict,
doc_type_list)
def get_property_setters(self, doclist):
"""
Returns a dict of property setter lists and doc_type_list
"""
from webnotes.utils import cstr
property_dict = {}
# final property dict will be
# {
# doc_type: {
# fieldname: [list of property setter dicts]
# }
# }
doc_type_list = list(set(
d.doctype=='DocType' and d.name or d.parent
for d in doclist))
in_string = '", "'.join(doc_type_list)
for ps in webnotes.conn.sql("""\
SELECT doc_type, field_name, property, property_type, value
FROM `tabProperty Setter`
WHERE doc_type IN ("%s")""" % in_string, as_dict=1):
property_dict.setdefault(ps.get('doc_type'),
{}).setdefault(cstr(ps.get('field_name')), []).append(ps)
return property_dict, doc_type_list
def update_field_properties(self, d, property_dict):
"""
apply properties except previous_field ones
"""
from webnotes.utils import cstr
# get property setters for a given doctype's fields
doctype_property_dict = (d.doctype=='DocField' and property_dict.get(d.parent) or
property_dict.get(d.name))
if not (doctype_property_dict and doctype_property_dict.get(cstr(d.fieldname))): return
from webnotes.utils import cint
prop_updates = []
for prop in doctype_property_dict.get(cstr(d.fieldname)):
if prop.get('property')=='previous_field': continue
if prop.get('property_type') == 'Check' or \
prop.get('value') in ['0', '1']:
prop_updates.append([prop.get('property'), cint(prop.get('value'))])
else:
prop_updates.append([prop.get('property'), prop.get('value')])
prop_updates and d.fields.update(dict(prop_updates))
def apply_previous_field_properties(self, doclist, property_dict,
doc_type_list):
"""
"""
prev_field_dict = self.get_previous_field_properties(property_dict)
if not prev_field_dict: return
for doc_type in doc_type_list:
docfields = self.get_sorted_docfields(doclist, doc_type)
docfields = self.sort_docfields(doc_type, docfields, prev_field_dict)
if docfields: self.change_idx(doclist, docfields, doc_type)
def get_previous_field_properties(self, property_dict):
"""
setup prev_field_dict
"""
from webnotes.utils import cstr
doctype_prev_field_list = []
for doc_type in property_dict:
prev_field_list = []
for prop_list in property_dict.get(doc_type).values():
for prop in prop_list:
if prop.get('property') == 'previous_field':
prev_field_list.append([prop.get('value'),
prop.get('field_name')])
break
if not prev_field_list: continue
doctype_prev_field_list.append([doc_type, dict(prev_field_list)])
if not doctype_prev_field_list: return
return dict(doctype_prev_field_list)
def get_sorted_docfields(self, doclist, doc_type):
"""
get a sorted list of docfield names
"""
sorted_list = sorted([
d for d in doclist
if d.doctype == 'DocField'
and d.parent == doc_type
], key=lambda df: df.idx)
return [d.fieldname for d in sorted_list]
def sort_docfields(self, doc_type, docfields, prev_field_dict):
"""
"""
temp_dict = prev_field_dict.get(doc_type)
if not temp_dict: return
prev_field = 'None' in temp_dict and 'None' or docfields[0]
i = 0
while temp_dict:
get_next_docfield = True
cur_field = temp_dict.get(prev_field)
if cur_field and cur_field in docfields:
try:
del temp_dict[prev_field]
if prev_field in docfields:
docfields.remove(cur_field)
docfields.insert(docfields.index(prev_field) + 1,
cur_field)
elif prev_field == 'None':
docfields.remove(cur_field)
docfields.insert(0, cur_field)
except ValueError:
pass
if cur_field in temp_dict:
prev_field = cur_field
get_next_docfield = False
if get_next_docfield:
i += 1
if i>=len(docfields): break
prev_field = docfields[i]
keys, vals = temp_dict.keys(), temp_dict.values()
if prev_field in vals:
i -= 1
prev_field = keys[vals.index(prev_field)]
return docfields
def change_idx(self, doclist, docfields, doc_type):
for d in doclist:
if d.fieldname and d.fieldname in docfields and d.parent == doc_type:
d.idx = docfields.index(d.fieldname) + 1
def add_code(self, doc):
"""add js, css code"""
import os
from webnotes.modules import scrub, get_module_path
import conf
modules_path = get_module_path(doc.module)
path = os.path.join(modules_path, 'doctype', scrub(doc.name))
def _add_code(fname, fieldname):
fpath = os.path.join(path, fname)
if os.path.exists(fpath):
with open(fpath, 'r') as f:
doc.fields[fieldname] = f.read()
_add_code(scrub(doc.name) + '.js', '__js')
_add_code(scrub(doc.name) + '.css', '__css')
_add_code('%s_list.js' % scrub(doc.name), '__listjs')
_add_code('help.md', 'description')
# embed all require files
import re
def _sub(match):
fpath = os.path.join(os.path.dirname(conf.modules_path), \
re.search('["\'][^"\']*["\']', match.group(0)).group(0)[1:-1])
if os.path.exists(fpath):
with open(fpath, 'r') as f:
return '\n' + f.read() + '\n'
else:
return '\n// no file "%s" found \n' % fpath
if doc.fields.get('__js'):
doc.fields['__js'] = re.sub('(wn.require\([^\)]*.)', _sub, doc.fields['__js'])
# custom script
from webnotes.model.code import get_custom_script
custom = get_custom_script(doc.name, 'Client') or ''
doc.fields['__js'] = doc.fields.setdefault('__js', '') + '\n' + custom
def load_select_options(self, doclist):
"""
Loads Select options for 'Select' fields
with link: as start of options
"""
for d in doclist:
if (d.doctype == 'DocField' and d.fieldtype == 'Select' and
d.options and d.options[:5].lower() == 'link:'):
# Get various options
opt_list = self._get_select_options(d)
opt_list = [''] + [o[0] or '' for o in opt_list]
d.options = "\n".join(opt_list)
def _get_select_options(self, d):
"""
Queries and returns select options
(called by load_select_options)
"""
op = d.options.split('\n')
if len(op) > 1 and op[1][:4].lower() == 'sql:':
# Execute the sql query
query = op[1][4:].replace('__user',
webnotes.session.get('user'))
else:
# Extract DocType and Conditions
# and execute the resulting query
dt = op[0][5:].strip()
cond_list = [cond.replace('__user',
webnotes.session.get('user')) for cond in op[1:]]
query = """\
SELECT name FROM `tab%s`
WHERE %s docstatus!=2
ORDER BY name ASC""" % (dt,
cond_list and (" AND ".join(cond_list) + " AND ") or "")
try:
opt_list = webnotes.conn.sql(query)
except:
# WARNING: Exception suppressed
opt_list = []
return opt_list
def load_print_formats(self, doclist):
"""
Load Print Formats in doclist
"""
# TODO: Process Print Formats for $import
# to deprecate code in print_format.py
# if this is implemented, clear CacheItem on saving print format
print_formats = webnotes.conn.sql("""\
SELECT * FROM `tabPrint Format`
WHERE doc_type=%s AND docstatus<2""", doclist[0].fields.get('name'),
as_dict=1)
for pf in print_formats:
if not pf: continue
print_format_doc = webnotes.model.doc.Document('Print Format', fielddata=pf)
doclist.append(print_format_doc)
def load_from_cache(self):
import json
json_doclist = CacheItem(self.name).get()
if json_doclist:
return [webnotes.model.doc.Document(fielddata=d)
for d in json.loads(json_doclist)]
def insert_into_cache(self, doclist):
import json
json_doclist = json.dumps([d.fields for d in doclist])
CacheItem(self.name).set(json_doclist)
def get(dt, form=1):
"""
Load "DocType" - called by form builder, report buider and from code.py (when there is no cache)
"""
if not dt: return []
doclist = _DocType(dt).make_doclist(form)
return doclist
# Deprecate after import_docs rewrite
def get_field_property(dt, fieldname, property):
"""
get a field property, override it from property setter if specified
"""
field = webnotes.conn.sql("""
select name, `%s`
from tabDocField
where parent=%s and fieldname=%s""" % (property, '%s', '%s'), (dt, fieldname))
prop = webnotes.conn.sql("""
select value
from `tabProperty Setter`
where doc_type=%s and field_name=%s and property=%s""", (dt, fieldname, property))
if prop:
return prop[0][0]
else:
return field[0][1]
def get_property(dt, property, fieldname=None):
"""
get a doctype property, override it from property setter if specified
"""
if fieldname:
prop = webnotes.conn.sql("""
select value
from `tabProperty Setter`
where doc_type=%s and field_name=%s
and property=%s""", (dt, fieldname, property))
if prop:
return prop[0][0]
else:
val = webnotes.conn.sql("""\
SELECT %s FROM `tabDocField`
WHERE parent = %s AND fieldname = %s""" % \
(property, '%s', '%s'), (dt, fieldname))
if val and val[0][0]: return val[0][0] or ''
else:
prop = webnotes.conn.sql("""
select value
from `tabProperty Setter`
where doc_type=%s and doctype_or_field='DocType'
and property=%s""", (dt, property))
if prop:
return prop[0][0]
else:
return webnotes.conn.get_value('DocType', dt, property)
# Test Cases
import unittest
class DocTypeTest(unittest.TestCase):
def setUp(self):
self.name = 'Sales Order'
self.dt = _DocType(self.name)
def tearDown(self):
webnotes.conn.rollback()
def test_make_doclist(self):
doclist = self.dt.make_doclist()
for d in doclist:
print d.idx, d.doctype, d.name, d.parent
if not d.doctype: print d.fields
#print "--", d.name, "--"
#print d.doctype
self.assertTrue(doclist)
def test_get_custom_fields(self):
return
doclist = self.dt.get_custom_fields(self.name)
for d in doclist:
print "--", d.name, "--"
print d.fields
self.assertTrue(doclist) | WHERE dt = %s AND docstatus < 2""", doc_type, as_dict=1) | random_line_split |
doctype.py | # Copyright (c) 2012 Web Notes Technologies Pvt Ltd (http://erpnext.com)
#
# MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# TODO:
# Patch: Remove DocFormat
# imports
import webnotes
import webnotes.model
import webnotes.model.doc
from webnotes.utils.cache import CacheItem
class _DocType:
"""
The _DocType object is created internally using the module's `get` method.
"""
def __init__(self, name):
self.name = name
def make_doclist(self, form=1):
"""
"""
# do not load from cache if auto cache clear is enabled
import conf
from_cache = True
if hasattr(conf, 'auto_cache_clear'):
from_cache = not conf.auto_cache_clear
if form and from_cache:
cached_doclist = self.load_from_cache()
if cached_doclist: return cached_doclist
# Get parent doc and its fields
doclist = webnotes.model.doc.get('DocType', self.name, 1)
doclist += self.get_custom_fields(self.name)
if form:
table_fields = [t[0] for t in self.get_table_fields(doclist)]
# for each unique table
for t in list(set(table_fields)):
# Get child doc and its fields
table_doclist = webnotes.model.doc.get('DocType', t, 1)
table_doclist += self.get_custom_fields(t)
doclist += table_doclist
self.apply_property_setters(doclist)
if form:
self.load_select_options(doclist)
self.add_code(doclist[0])
self.load_print_formats(doclist)
self.insert_into_cache(doclist)
return doclist
def get_custom_fields(self, doc_type):
"""
Gets a list of custom field docs masked as type DocField
"""
custom_doclist = []
res = webnotes.conn.sql("""SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", doc_type, as_dict=1)
for r in res:
# Cheat! Mask Custom Field as DocField
custom_field = webnotes.model.doc.Document(fielddata=r)
self.mask_custom_field(custom_field, doc_type)
custom_doclist.append(custom_field)
return custom_doclist
def mask_custom_field(self, custom_field, doc_type):
"""
Masks doctype and parent related properties of Custom Field as that
of DocField
"""
custom_field.fields.update({
'doctype': 'DocField',
'parent': doc_type,
'parentfield': 'fields',
'parenttype': 'DocType',
})
def get_table_fields(self, doclist):
"""
Returns [[options, fieldname]] of fields of type 'Table'
"""
table_fields = []
for d in doclist:
if d.doctype=='DocField' and d.fieldtype == 'Table':
table_fields.append([d.options, d.fieldname])
return table_fields
def apply_property_setters(self, doclist):
"""
"""
property_dict, doc_type_list = self.get_property_setters(doclist)
for d in doclist:
self.update_field_properties(d, property_dict)
self.apply_previous_field_properties(doclist, property_dict,
doc_type_list)
def get_property_setters(self, doclist):
"""
Returns a dict of property setter lists and doc_type_list
"""
from webnotes.utils import cstr
property_dict = {}
# final property dict will be
# {
# doc_type: {
# fieldname: [list of property setter dicts]
# }
# }
doc_type_list = list(set(
d.doctype=='DocType' and d.name or d.parent
for d in doclist))
in_string = '", "'.join(doc_type_list)
for ps in webnotes.conn.sql("""\
SELECT doc_type, field_name, property, property_type, value
FROM `tabProperty Setter`
WHERE doc_type IN ("%s")""" % in_string, as_dict=1):
property_dict.setdefault(ps.get('doc_type'),
{}).setdefault(cstr(ps.get('field_name')), []).append(ps)
return property_dict, doc_type_list
def update_field_properties(self, d, property_dict):
"""
apply properties except previous_field ones
"""
from webnotes.utils import cstr
# get property setters for a given doctype's fields
doctype_property_dict = (d.doctype=='DocField' and property_dict.get(d.parent) or
property_dict.get(d.name))
if not (doctype_property_dict and doctype_property_dict.get(cstr(d.fieldname))): return
from webnotes.utils import cint
prop_updates = []
for prop in doctype_property_dict.get(cstr(d.fieldname)):
if prop.get('property')=='previous_field': continue
if prop.get('property_type') == 'Check' or \
prop.get('value') in ['0', '1']:
prop_updates.append([prop.get('property'), cint(prop.get('value'))])
else:
prop_updates.append([prop.get('property'), prop.get('value')])
prop_updates and d.fields.update(dict(prop_updates))
def apply_previous_field_properties(self, doclist, property_dict,
doc_type_list):
"""
"""
prev_field_dict = self.get_previous_field_properties(property_dict)
if not prev_field_dict: return
for doc_type in doc_type_list:
docfields = self.get_sorted_docfields(doclist, doc_type)
docfields = self.sort_docfields(doc_type, docfields, prev_field_dict)
if docfields: self.change_idx(doclist, docfields, doc_type)
def get_previous_field_properties(self, property_dict):
"""
setup prev_field_dict
"""
from webnotes.utils import cstr
doctype_prev_field_list = []
for doc_type in property_dict:
prev_field_list = []
for prop_list in property_dict.get(doc_type).values():
for prop in prop_list:
if prop.get('property') == 'previous_field':
prev_field_list.append([prop.get('value'),
prop.get('field_name')])
break
if not prev_field_list: continue
doctype_prev_field_list.append([doc_type, dict(prev_field_list)])
if not doctype_prev_field_list: return
return dict(doctype_prev_field_list)
def get_sorted_docfields(self, doclist, doc_type):
"""
get a sorted list of docfield names
"""
sorted_list = sorted([
d for d in doclist
if d.doctype == 'DocField'
and d.parent == doc_type
], key=lambda df: df.idx)
return [d.fieldname for d in sorted_list]
def sort_docfields(self, doc_type, docfields, prev_field_dict):
|
def change_idx(self, doclist, docfields, doc_type):
for d in doclist:
if d.fieldname and d.fieldname in docfields and d.parent == doc_type:
d.idx = docfields.index(d.fieldname) + 1
def add_code(self, doc):
"""add js, css code"""
import os
from webnotes.modules import scrub, get_module_path
import conf
modules_path = get_module_path(doc.module)
path = os.path.join(modules_path, 'doctype', scrub(doc.name))
def _add_code(fname, fieldname):
fpath = os.path.join(path, fname)
if os.path.exists(fpath):
with open(fpath, 'r') as f:
doc.fields[fieldname] = f.read()
_add_code(scrub(doc.name) + '.js', '__js')
_add_code(scrub(doc.name) + '.css', '__css')
_add_code('%s_list.js' % scrub(doc.name), '__listjs')
_add_code('help.md', 'description')
# embed all require files
import re
def _sub(match):
fpath = os.path.join(os.path.dirname(conf.modules_path), \
re.search('["\'][^"\']*["\']', match.group(0)).group(0)[1:-1])
if os.path.exists(fpath):
with open(fpath, 'r') as f:
return '\n' + f.read() + '\n'
else:
return '\n// no file "%s" found \n' % fpath
if doc.fields.get('__js'):
doc.fields['__js'] = re.sub('(wn.require\([^\)]*.)', _sub, doc.fields['__js'])
# custom script
from webnotes.model.code import get_custom_script
custom = get_custom_script(doc.name, 'Client') or ''
doc.fields['__js'] = doc.fields.setdefault('__js', '') + '\n' + custom
def load_select_options(self, doclist):
"""
Loads Select options for 'Select' fields
with link: as start of options
"""
for d in doclist:
if (d.doctype == 'DocField' and d.fieldtype == 'Select' and
d.options and d.options[:5].lower() == 'link:'):
# Get various options
opt_list = self._get_select_options(d)
opt_list = [''] + [o[0] or '' for o in opt_list]
d.options = "\n".join(opt_list)
def _get_select_options(self, d):
"""
Queries and returns select options
(called by load_select_options)
"""
op = d.options.split('\n')
if len(op) > 1 and op[1][:4].lower() == 'sql:':
# Execute the sql query
query = op[1][4:].replace('__user',
webnotes.session.get('user'))
else:
# Extract DocType and Conditions
# and execute the resulting query
dt = op[0][5:].strip()
cond_list = [cond.replace('__user',
webnotes.session.get('user')) for cond in op[1:]]
query = """\
SELECT name FROM `tab%s`
WHERE %s docstatus!=2
ORDER BY name ASC""" % (dt,
cond_list and (" AND ".join(cond_list) + " AND ") or "")
try:
opt_list = webnotes.conn.sql(query)
except:
# WARNING: Exception suppressed
opt_list = []
return opt_list
def load_print_formats(self, doclist):
"""
Load Print Formats in doclist
"""
# TODO: Process Print Formats for $import
# to deprecate code in print_format.py
# if this is implemented, clear CacheItem on saving print format
print_formats = webnotes.conn.sql("""\
SELECT * FROM `tabPrint Format`
WHERE doc_type=%s AND docstatus<2""", doclist[0].fields.get('name'),
as_dict=1)
for pf in print_formats:
if not pf: continue
print_format_doc = webnotes.model.doc.Document('Print Format', fielddata=pf)
doclist.append(print_format_doc)
def load_from_cache(self):
import json
json_doclist = CacheItem(self.name).get()
if json_doclist:
return [webnotes.model.doc.Document(fielddata=d)
for d in json.loads(json_doclist)]
def insert_into_cache(self, doclist):
import json
json_doclist = json.dumps([d.fields for d in doclist])
CacheItem(self.name).set(json_doclist)
def get(dt, form=1):
"""
Load "DocType" - called by form builder, report buider and from code.py (when there is no cache)
"""
if not dt: return []
doclist = _DocType(dt).make_doclist(form)
return doclist
# Deprecate after import_docs rewrite
def get_field_property(dt, fieldname, property):
"""
get a field property, override it from property setter if specified
"""
field = webnotes.conn.sql("""
select name, `%s`
from tabDocField
where parent=%s and fieldname=%s""" % (property, '%s', '%s'), (dt, fieldname))
prop = webnotes.conn.sql("""
select value
from `tabProperty Setter`
where doc_type=%s and field_name=%s and property=%s""", (dt, fieldname, property))
if prop:
return prop[0][0]
else:
return field[0][1]
def get_property(dt, property, fieldname=None):
"""
get a doctype property, override it from property setter if specified
"""
if fieldname:
prop = webnotes.conn.sql("""
select value
from `tabProperty Setter`
where doc_type=%s and field_name=%s
and property=%s""", (dt, fieldname, property))
if prop:
return prop[0][0]
else:
val = webnotes.conn.sql("""\
SELECT %s FROM `tabDocField`
WHERE parent = %s AND fieldname = %s""" % \
(property, '%s', '%s'), (dt, fieldname))
if val and val[0][0]: return val[0][0] or ''
else:
prop = webnotes.conn.sql("""
select value
from `tabProperty Setter`
where doc_type=%s and doctype_or_field='DocType'
and property=%s""", (dt, property))
if prop:
return prop[0][0]
else:
return webnotes.conn.get_value('DocType', dt, property)
# Test Cases
import unittest
class DocTypeTest(unittest.TestCase):
def setUp(self):
self.name = 'Sales Order'
self.dt = _DocType(self.name)
def tearDown(self):
webnotes.conn.rollback()
def test_make_doclist(self):
doclist = self.dt.make_doclist()
for d in doclist:
print d.idx, d.doctype, d.name, d.parent
if not d.doctype: print d.fields
#print "--", d.name, "--"
#print d.doctype
self.assertTrue(doclist)
def test_get_custom_fields(self):
return
doclist = self.dt.get_custom_fields(self.name)
for d in doclist:
print "--", d.name, "--"
print d.fields
self.assertTrue(doclist)
| """
"""
temp_dict = prev_field_dict.get(doc_type)
if not temp_dict: return
prev_field = 'None' in temp_dict and 'None' or docfields[0]
i = 0
while temp_dict:
get_next_docfield = True
cur_field = temp_dict.get(prev_field)
if cur_field and cur_field in docfields:
try:
del temp_dict[prev_field]
if prev_field in docfields:
docfields.remove(cur_field)
docfields.insert(docfields.index(prev_field) + 1,
cur_field)
elif prev_field == 'None':
docfields.remove(cur_field)
docfields.insert(0, cur_field)
except ValueError:
pass
if cur_field in temp_dict:
prev_field = cur_field
get_next_docfield = False
if get_next_docfield:
i += 1
if i>=len(docfields): break
prev_field = docfields[i]
keys, vals = temp_dict.keys(), temp_dict.values()
if prev_field in vals:
i -= 1
prev_field = keys[vals.index(prev_field)]
return docfields | identifier_body |
doctype.py | # Copyright (c) 2012 Web Notes Technologies Pvt Ltd (http://erpnext.com)
#
# MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# TODO:
# Patch: Remove DocFormat
# imports
import webnotes
import webnotes.model
import webnotes.model.doc
from webnotes.utils.cache import CacheItem
class _DocType:
"""
The _DocType object is created internally using the module's `get` method.
"""
def __init__(self, name):
self.name = name
def make_doclist(self, form=1):
"""
"""
# do not load from cache if auto cache clear is enabled
import conf
from_cache = True
if hasattr(conf, 'auto_cache_clear'):
from_cache = not conf.auto_cache_clear
if form and from_cache:
cached_doclist = self.load_from_cache()
if cached_doclist: return cached_doclist
# Get parent doc and its fields
doclist = webnotes.model.doc.get('DocType', self.name, 1)
doclist += self.get_custom_fields(self.name)
if form:
table_fields = [t[0] for t in self.get_table_fields(doclist)]
# for each unique table
for t in list(set(table_fields)):
# Get child doc and its fields
table_doclist = webnotes.model.doc.get('DocType', t, 1)
table_doclist += self.get_custom_fields(t)
doclist += table_doclist
self.apply_property_setters(doclist)
if form:
self.load_select_options(doclist)
self.add_code(doclist[0])
self.load_print_formats(doclist)
self.insert_into_cache(doclist)
return doclist
def get_custom_fields(self, doc_type):
"""
Gets a list of custom field docs masked as type DocField
"""
custom_doclist = []
res = webnotes.conn.sql("""SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", doc_type, as_dict=1)
for r in res:
# Cheat! Mask Custom Field as DocField
custom_field = webnotes.model.doc.Document(fielddata=r)
self.mask_custom_field(custom_field, doc_type)
custom_doclist.append(custom_field)
return custom_doclist
def mask_custom_field(self, custom_field, doc_type):
"""
Masks doctype and parent related properties of Custom Field as that
of DocField
"""
custom_field.fields.update({
'doctype': 'DocField',
'parent': doc_type,
'parentfield': 'fields',
'parenttype': 'DocType',
})
def get_table_fields(self, doclist):
"""
Returns [[options, fieldname]] of fields of type 'Table'
"""
table_fields = []
for d in doclist:
if d.doctype=='DocField' and d.fieldtype == 'Table':
table_fields.append([d.options, d.fieldname])
return table_fields
def apply_property_setters(self, doclist):
"""
"""
property_dict, doc_type_list = self.get_property_setters(doclist)
for d in doclist:
self.update_field_properties(d, property_dict)
self.apply_previous_field_properties(doclist, property_dict,
doc_type_list)
def get_property_setters(self, doclist):
"""
Returns a dict of property setter lists and doc_type_list
"""
from webnotes.utils import cstr
property_dict = {}
# final property dict will be
# {
# doc_type: {
# fieldname: [list of property setter dicts]
# }
# }
doc_type_list = list(set(
d.doctype=='DocType' and d.name or d.parent
for d in doclist))
in_string = '", "'.join(doc_type_list)
for ps in webnotes.conn.sql("""\
SELECT doc_type, field_name, property, property_type, value
FROM `tabProperty Setter`
WHERE doc_type IN ("%s")""" % in_string, as_dict=1):
property_dict.setdefault(ps.get('doc_type'),
{}).setdefault(cstr(ps.get('field_name')), []).append(ps)
return property_dict, doc_type_list
def update_field_properties(self, d, property_dict):
"""
apply properties except previous_field ones
"""
from webnotes.utils import cstr
# get property setters for a given doctype's fields
doctype_property_dict = (d.doctype=='DocField' and property_dict.get(d.parent) or
property_dict.get(d.name))
if not (doctype_property_dict and doctype_property_dict.get(cstr(d.fieldname))): |
from webnotes.utils import cint
prop_updates = []
for prop in doctype_property_dict.get(cstr(d.fieldname)):
if prop.get('property')=='previous_field': continue
if prop.get('property_type') == 'Check' or \
prop.get('value') in ['0', '1']:
prop_updates.append([prop.get('property'), cint(prop.get('value'))])
else:
prop_updates.append([prop.get('property'), prop.get('value')])
prop_updates and d.fields.update(dict(prop_updates))
def apply_previous_field_properties(self, doclist, property_dict,
doc_type_list):
"""
"""
prev_field_dict = self.get_previous_field_properties(property_dict)
if not prev_field_dict: return
for doc_type in doc_type_list:
docfields = self.get_sorted_docfields(doclist, doc_type)
docfields = self.sort_docfields(doc_type, docfields, prev_field_dict)
if docfields: self.change_idx(doclist, docfields, doc_type)
def get_previous_field_properties(self, property_dict):
"""
setup prev_field_dict
"""
from webnotes.utils import cstr
doctype_prev_field_list = []
for doc_type in property_dict:
prev_field_list = []
for prop_list in property_dict.get(doc_type).values():
for prop in prop_list:
if prop.get('property') == 'previous_field':
prev_field_list.append([prop.get('value'),
prop.get('field_name')])
break
if not prev_field_list: continue
doctype_prev_field_list.append([doc_type, dict(prev_field_list)])
if not doctype_prev_field_list: return
return dict(doctype_prev_field_list)
def get_sorted_docfields(self, doclist, doc_type):
"""
get a sorted list of docfield names
"""
sorted_list = sorted([
d for d in doclist
if d.doctype == 'DocField'
and d.parent == doc_type
], key=lambda df: df.idx)
return [d.fieldname for d in sorted_list]
def sort_docfields(self, doc_type, docfields, prev_field_dict):
"""
"""
temp_dict = prev_field_dict.get(doc_type)
if not temp_dict: return
prev_field = 'None' in temp_dict and 'None' or docfields[0]
i = 0
while temp_dict:
get_next_docfield = True
cur_field = temp_dict.get(prev_field)
if cur_field and cur_field in docfields:
try:
del temp_dict[prev_field]
if prev_field in docfields:
docfields.remove(cur_field)
docfields.insert(docfields.index(prev_field) + 1,
cur_field)
elif prev_field == 'None':
docfields.remove(cur_field)
docfields.insert(0, cur_field)
except ValueError:
pass
if cur_field in temp_dict:
prev_field = cur_field
get_next_docfield = False
if get_next_docfield:
i += 1
if i>=len(docfields): break
prev_field = docfields[i]
keys, vals = temp_dict.keys(), temp_dict.values()
if prev_field in vals:
i -= 1
prev_field = keys[vals.index(prev_field)]
return docfields
def change_idx(self, doclist, docfields, doc_type):
for d in doclist:
if d.fieldname and d.fieldname in docfields and d.parent == doc_type:
d.idx = docfields.index(d.fieldname) + 1
def add_code(self, doc):
"""add js, css code"""
import os
from webnotes.modules import scrub, get_module_path
import conf
modules_path = get_module_path(doc.module)
path = os.path.join(modules_path, 'doctype', scrub(doc.name))
def _add_code(fname, fieldname):
fpath = os.path.join(path, fname)
if os.path.exists(fpath):
with open(fpath, 'r') as f:
doc.fields[fieldname] = f.read()
_add_code(scrub(doc.name) + '.js', '__js')
_add_code(scrub(doc.name) + '.css', '__css')
_add_code('%s_list.js' % scrub(doc.name), '__listjs')
_add_code('help.md', 'description')
# embed all require files
import re
def _sub(match):
fpath = os.path.join(os.path.dirname(conf.modules_path), \
re.search('["\'][^"\']*["\']', match.group(0)).group(0)[1:-1])
if os.path.exists(fpath):
with open(fpath, 'r') as f:
return '\n' + f.read() + '\n'
else:
return '\n// no file "%s" found \n' % fpath
if doc.fields.get('__js'):
doc.fields['__js'] = re.sub('(wn.require\([^\)]*.)', _sub, doc.fields['__js'])
# custom script
from webnotes.model.code import get_custom_script
custom = get_custom_script(doc.name, 'Client') or ''
doc.fields['__js'] = doc.fields.setdefault('__js', '') + '\n' + custom
def load_select_options(self, doclist):
"""
Loads Select options for 'Select' fields
with link: as start of options
"""
for d in doclist:
if (d.doctype == 'DocField' and d.fieldtype == 'Select' and
d.options and d.options[:5].lower() == 'link:'):
# Get various options
opt_list = self._get_select_options(d)
opt_list = [''] + [o[0] or '' for o in opt_list]
d.options = "\n".join(opt_list)
def _get_select_options(self, d):
"""
Queries and returns select options
(called by load_select_options)
"""
op = d.options.split('\n')
if len(op) > 1 and op[1][:4].lower() == 'sql:':
# Execute the sql query
query = op[1][4:].replace('__user',
webnotes.session.get('user'))
else:
# Extract DocType and Conditions
# and execute the resulting query
dt = op[0][5:].strip()
cond_list = [cond.replace('__user',
webnotes.session.get('user')) for cond in op[1:]]
query = """\
SELECT name FROM `tab%s`
WHERE %s docstatus!=2
ORDER BY name ASC""" % (dt,
cond_list and (" AND ".join(cond_list) + " AND ") or "")
try:
opt_list = webnotes.conn.sql(query)
except:
# WARNING: Exception suppressed
opt_list = []
return opt_list
def load_print_formats(self, doclist):
"""
Load Print Formats in doclist
"""
# TODO: Process Print Formats for $import
# to deprecate code in print_format.py
# if this is implemented, clear CacheItem on saving print format
print_formats = webnotes.conn.sql("""\
SELECT * FROM `tabPrint Format`
WHERE doc_type=%s AND docstatus<2""", doclist[0].fields.get('name'),
as_dict=1)
for pf in print_formats:
if not pf: continue
print_format_doc = webnotes.model.doc.Document('Print Format', fielddata=pf)
doclist.append(print_format_doc)
def load_from_cache(self):
import json
json_doclist = CacheItem(self.name).get()
if json_doclist:
return [webnotes.model.doc.Document(fielddata=d)
for d in json.loads(json_doclist)]
def insert_into_cache(self, doclist):
import json
json_doclist = json.dumps([d.fields for d in doclist])
CacheItem(self.name).set(json_doclist)
def get(dt, form=1):
"""
Load "DocType" - called by form builder, report buider and from code.py (when there is no cache)
"""
if not dt: return []
doclist = _DocType(dt).make_doclist(form)
return doclist
# Deprecate after import_docs rewrite
def get_field_property(dt, fieldname, property):
"""
get a field property, override it from property setter if specified
"""
field = webnotes.conn.sql("""
select name, `%s`
from tabDocField
where parent=%s and fieldname=%s""" % (property, '%s', '%s'), (dt, fieldname))
prop = webnotes.conn.sql("""
select value
from `tabProperty Setter`
where doc_type=%s and field_name=%s and property=%s""", (dt, fieldname, property))
if prop:
return prop[0][0]
else:
return field[0][1]
def get_property(dt, property, fieldname=None):
"""
get a doctype property, override it from property setter if specified
"""
if fieldname:
prop = webnotes.conn.sql("""
select value
from `tabProperty Setter`
where doc_type=%s and field_name=%s
and property=%s""", (dt, fieldname, property))
if prop:
return prop[0][0]
else:
val = webnotes.conn.sql("""\
SELECT %s FROM `tabDocField`
WHERE parent = %s AND fieldname = %s""" % \
(property, '%s', '%s'), (dt, fieldname))
if val and val[0][0]: return val[0][0] or ''
else:
prop = webnotes.conn.sql("""
select value
from `tabProperty Setter`
where doc_type=%s and doctype_or_field='DocType'
and property=%s""", (dt, property))
if prop:
return prop[0][0]
else:
return webnotes.conn.get_value('DocType', dt, property)
# Test Cases
import unittest
class DocTypeTest(unittest.TestCase):
def setUp(self):
self.name = 'Sales Order'
self.dt = _DocType(self.name)
def tearDown(self):
webnotes.conn.rollback()
def test_make_doclist(self):
doclist = self.dt.make_doclist()
for d in doclist:
print d.idx, d.doctype, d.name, d.parent
if not d.doctype: print d.fields
#print "--", d.name, "--"
#print d.doctype
self.assertTrue(doclist)
def test_get_custom_fields(self):
return
doclist = self.dt.get_custom_fields(self.name)
for d in doclist:
print "--", d.name, "--"
print d.fields
self.assertTrue(doclist)
| return | conditional_block |
doctype.py | # Copyright (c) 2012 Web Notes Technologies Pvt Ltd (http://erpnext.com)
#
# MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# TODO:
# Patch: Remove DocFormat
# imports
import webnotes
import webnotes.model
import webnotes.model.doc
from webnotes.utils.cache import CacheItem
class _DocType:
"""
The _DocType object is created internally using the module's `get` method.
"""
def __init__(self, name):
self.name = name
def make_doclist(self, form=1):
"""
"""
# do not load from cache if auto cache clear is enabled
import conf
from_cache = True
if hasattr(conf, 'auto_cache_clear'):
from_cache = not conf.auto_cache_clear
if form and from_cache:
cached_doclist = self.load_from_cache()
if cached_doclist: return cached_doclist
# Get parent doc and its fields
doclist = webnotes.model.doc.get('DocType', self.name, 1)
doclist += self.get_custom_fields(self.name)
if form:
table_fields = [t[0] for t in self.get_table_fields(doclist)]
# for each unique table
for t in list(set(table_fields)):
# Get child doc and its fields
table_doclist = webnotes.model.doc.get('DocType', t, 1)
table_doclist += self.get_custom_fields(t)
doclist += table_doclist
self.apply_property_setters(doclist)
if form:
self.load_select_options(doclist)
self.add_code(doclist[0])
self.load_print_formats(doclist)
self.insert_into_cache(doclist)
return doclist
def | (self, doc_type):
"""
Gets a list of custom field docs masked as type DocField
"""
custom_doclist = []
res = webnotes.conn.sql("""SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", doc_type, as_dict=1)
for r in res:
# Cheat! Mask Custom Field as DocField
custom_field = webnotes.model.doc.Document(fielddata=r)
self.mask_custom_field(custom_field, doc_type)
custom_doclist.append(custom_field)
return custom_doclist
def mask_custom_field(self, custom_field, doc_type):
"""
Masks doctype and parent related properties of Custom Field as that
of DocField
"""
custom_field.fields.update({
'doctype': 'DocField',
'parent': doc_type,
'parentfield': 'fields',
'parenttype': 'DocType',
})
def get_table_fields(self, doclist):
"""
Returns [[options, fieldname]] of fields of type 'Table'
"""
table_fields = []
for d in doclist:
if d.doctype=='DocField' and d.fieldtype == 'Table':
table_fields.append([d.options, d.fieldname])
return table_fields
def apply_property_setters(self, doclist):
"""
"""
property_dict, doc_type_list = self.get_property_setters(doclist)
for d in doclist:
self.update_field_properties(d, property_dict)
self.apply_previous_field_properties(doclist, property_dict,
doc_type_list)
def get_property_setters(self, doclist):
"""
Returns a dict of property setter lists and doc_type_list
"""
from webnotes.utils import cstr
property_dict = {}
# final property dict will be
# {
# doc_type: {
# fieldname: [list of property setter dicts]
# }
# }
doc_type_list = list(set(
d.doctype=='DocType' and d.name or d.parent
for d in doclist))
in_string = '", "'.join(doc_type_list)
for ps in webnotes.conn.sql("""\
SELECT doc_type, field_name, property, property_type, value
FROM `tabProperty Setter`
WHERE doc_type IN ("%s")""" % in_string, as_dict=1):
property_dict.setdefault(ps.get('doc_type'),
{}).setdefault(cstr(ps.get('field_name')), []).append(ps)
return property_dict, doc_type_list
def update_field_properties(self, d, property_dict):
"""
apply properties except previous_field ones
"""
from webnotes.utils import cstr
# get property setters for a given doctype's fields
doctype_property_dict = (d.doctype=='DocField' and property_dict.get(d.parent) or
property_dict.get(d.name))
if not (doctype_property_dict and doctype_property_dict.get(cstr(d.fieldname))): return
from webnotes.utils import cint
prop_updates = []
for prop in doctype_property_dict.get(cstr(d.fieldname)):
if prop.get('property')=='previous_field': continue
if prop.get('property_type') == 'Check' or \
prop.get('value') in ['0', '1']:
prop_updates.append([prop.get('property'), cint(prop.get('value'))])
else:
prop_updates.append([prop.get('property'), prop.get('value')])
prop_updates and d.fields.update(dict(prop_updates))
def apply_previous_field_properties(self, doclist, property_dict,
doc_type_list):
"""
"""
prev_field_dict = self.get_previous_field_properties(property_dict)
if not prev_field_dict: return
for doc_type in doc_type_list:
docfields = self.get_sorted_docfields(doclist, doc_type)
docfields = self.sort_docfields(doc_type, docfields, prev_field_dict)
if docfields: self.change_idx(doclist, docfields, doc_type)
def get_previous_field_properties(self, property_dict):
"""
setup prev_field_dict
"""
from webnotes.utils import cstr
doctype_prev_field_list = []
for doc_type in property_dict:
prev_field_list = []
for prop_list in property_dict.get(doc_type).values():
for prop in prop_list:
if prop.get('property') == 'previous_field':
prev_field_list.append([prop.get('value'),
prop.get('field_name')])
break
if not prev_field_list: continue
doctype_prev_field_list.append([doc_type, dict(prev_field_list)])
if not doctype_prev_field_list: return
return dict(doctype_prev_field_list)
def get_sorted_docfields(self, doclist, doc_type):
"""
get a sorted list of docfield names
"""
sorted_list = sorted([
d for d in doclist
if d.doctype == 'DocField'
and d.parent == doc_type
], key=lambda df: df.idx)
return [d.fieldname for d in sorted_list]
def sort_docfields(self, doc_type, docfields, prev_field_dict):
"""
"""
temp_dict = prev_field_dict.get(doc_type)
if not temp_dict: return
prev_field = 'None' in temp_dict and 'None' or docfields[0]
i = 0
while temp_dict:
get_next_docfield = True
cur_field = temp_dict.get(prev_field)
if cur_field and cur_field in docfields:
try:
del temp_dict[prev_field]
if prev_field in docfields:
docfields.remove(cur_field)
docfields.insert(docfields.index(prev_field) + 1,
cur_field)
elif prev_field == 'None':
docfields.remove(cur_field)
docfields.insert(0, cur_field)
except ValueError:
pass
if cur_field in temp_dict:
prev_field = cur_field
get_next_docfield = False
if get_next_docfield:
i += 1
if i>=len(docfields): break
prev_field = docfields[i]
keys, vals = temp_dict.keys(), temp_dict.values()
if prev_field in vals:
i -= 1
prev_field = keys[vals.index(prev_field)]
return docfields
def change_idx(self, doclist, docfields, doc_type):
for d in doclist:
if d.fieldname and d.fieldname in docfields and d.parent == doc_type:
d.idx = docfields.index(d.fieldname) + 1
def add_code(self, doc):
"""add js, css code"""
import os
from webnotes.modules import scrub, get_module_path
import conf
modules_path = get_module_path(doc.module)
path = os.path.join(modules_path, 'doctype', scrub(doc.name))
def _add_code(fname, fieldname):
fpath = os.path.join(path, fname)
if os.path.exists(fpath):
with open(fpath, 'r') as f:
doc.fields[fieldname] = f.read()
_add_code(scrub(doc.name) + '.js', '__js')
_add_code(scrub(doc.name) + '.css', '__css')
_add_code('%s_list.js' % scrub(doc.name), '__listjs')
_add_code('help.md', 'description')
# embed all require files
import re
def _sub(match):
fpath = os.path.join(os.path.dirname(conf.modules_path), \
re.search('["\'][^"\']*["\']', match.group(0)).group(0)[1:-1])
if os.path.exists(fpath):
with open(fpath, 'r') as f:
return '\n' + f.read() + '\n'
else:
return '\n// no file "%s" found \n' % fpath
if doc.fields.get('__js'):
doc.fields['__js'] = re.sub('(wn.require\([^\)]*.)', _sub, doc.fields['__js'])
# custom script
from webnotes.model.code import get_custom_script
custom = get_custom_script(doc.name, 'Client') or ''
doc.fields['__js'] = doc.fields.setdefault('__js', '') + '\n' + custom
def load_select_options(self, doclist):
"""
Loads Select options for 'Select' fields
with link: as start of options
"""
for d in doclist:
if (d.doctype == 'DocField' and d.fieldtype == 'Select' and
d.options and d.options[:5].lower() == 'link:'):
# Get various options
opt_list = self._get_select_options(d)
opt_list = [''] + [o[0] or '' for o in opt_list]
d.options = "\n".join(opt_list)
def _get_select_options(self, d):
"""
Queries and returns select options
(called by load_select_options)
"""
op = d.options.split('\n')
if len(op) > 1 and op[1][:4].lower() == 'sql:':
# Execute the sql query
query = op[1][4:].replace('__user',
webnotes.session.get('user'))
else:
# Extract DocType and Conditions
# and execute the resulting query
dt = op[0][5:].strip()
cond_list = [cond.replace('__user',
webnotes.session.get('user')) for cond in op[1:]]
query = """\
SELECT name FROM `tab%s`
WHERE %s docstatus!=2
ORDER BY name ASC""" % (dt,
cond_list and (" AND ".join(cond_list) + " AND ") or "")
try:
opt_list = webnotes.conn.sql(query)
except:
# WARNING: Exception suppressed
opt_list = []
return opt_list
def load_print_formats(self, doclist):
"""
Load Print Formats in doclist
"""
# TODO: Process Print Formats for $import
# to deprecate code in print_format.py
# if this is implemented, clear CacheItem on saving print format
print_formats = webnotes.conn.sql("""\
SELECT * FROM `tabPrint Format`
WHERE doc_type=%s AND docstatus<2""", doclist[0].fields.get('name'),
as_dict=1)
for pf in print_formats:
if not pf: continue
print_format_doc = webnotes.model.doc.Document('Print Format', fielddata=pf)
doclist.append(print_format_doc)
def load_from_cache(self):
import json
json_doclist = CacheItem(self.name).get()
if json_doclist:
return [webnotes.model.doc.Document(fielddata=d)
for d in json.loads(json_doclist)]
def insert_into_cache(self, doclist):
import json
json_doclist = json.dumps([d.fields for d in doclist])
CacheItem(self.name).set(json_doclist)
def get(dt, form=1):
"""
Load "DocType" - called by form builder, report buider and from code.py (when there is no cache)
"""
if not dt: return []
doclist = _DocType(dt).make_doclist(form)
return doclist
# Deprecate after import_docs rewrite
def get_field_property(dt, fieldname, property):
"""
get a field property, override it from property setter if specified
"""
field = webnotes.conn.sql("""
select name, `%s`
from tabDocField
where parent=%s and fieldname=%s""" % (property, '%s', '%s'), (dt, fieldname))
prop = webnotes.conn.sql("""
select value
from `tabProperty Setter`
where doc_type=%s and field_name=%s and property=%s""", (dt, fieldname, property))
if prop:
return prop[0][0]
else:
return field[0][1]
def get_property(dt, property, fieldname=None):
"""
get a doctype property, override it from property setter if specified
"""
if fieldname:
prop = webnotes.conn.sql("""
select value
from `tabProperty Setter`
where doc_type=%s and field_name=%s
and property=%s""", (dt, fieldname, property))
if prop:
return prop[0][0]
else:
val = webnotes.conn.sql("""\
SELECT %s FROM `tabDocField`
WHERE parent = %s AND fieldname = %s""" % \
(property, '%s', '%s'), (dt, fieldname))
if val and val[0][0]: return val[0][0] or ''
else:
prop = webnotes.conn.sql("""
select value
from `tabProperty Setter`
where doc_type=%s and doctype_or_field='DocType'
and property=%s""", (dt, property))
if prop:
return prop[0][0]
else:
return webnotes.conn.get_value('DocType', dt, property)
# Test Cases
import unittest
class DocTypeTest(unittest.TestCase):
def setUp(self):
self.name = 'Sales Order'
self.dt = _DocType(self.name)
def tearDown(self):
webnotes.conn.rollback()
def test_make_doclist(self):
doclist = self.dt.make_doclist()
for d in doclist:
print d.idx, d.doctype, d.name, d.parent
if not d.doctype: print d.fields
#print "--", d.name, "--"
#print d.doctype
self.assertTrue(doclist)
def test_get_custom_fields(self):
return
doclist = self.dt.get_custom_fields(self.name)
for d in doclist:
print "--", d.name, "--"
print d.fields
self.assertTrue(doclist)
| get_custom_fields | identifier_name |
MotionTrackingLK.py | import tensorflow as tf
import cv2 as cv
import numpy as np
import math
def gaussian(x, sigma):
return 1/(sigma*math.sqrt(2*math.pi))*math.e**(-1/2*(x/sigma)**2)
def | (w, h, lx, ly):
devx = lx * w/2 + w/2
devy = ly * h/2 + h/2
return int(devx), int(devy)
def device_to_logical(w, h, devx, devy):
lx = (devx - w/2)/(w/2)
ly = (devy - h/2)/(h/2)
return lx, ly
def display_tracks(imgs, batch_tracks):
imgs = np.copy(imgs)
_,_,h,w,c = imgs.shape
imgs_with_tracks = []
for img_seq,tracks in zip(imgs, batch_tracks):
img = img_seq[-1]
for track in tracks:
for t_seq in range(1, len(track)):
lx_p, ly_p = track[t_seq-1]
x_p, y_p = logical_to_device(w, h, lx_p, ly_p)
lx, ly = track[t_seq]
x, y = logical_to_device(w, h, lx, ly)
img = cv.arrowedLine(img, (x_p, y_p), (x, y), (255, 0, 0))
imgs_with_tracks.append(img)
return np.asfarray(imgs_with_tracks)
class MotionTrackingLK(tf.keras.layers.Layer):
def __init__(self, num_tracks, window_pixel_wh=21, sigma=2, iterations=5, **kwargs):
self.sigma = sigma
assert(num_tracks > 1)
assert(window_pixel_wh >= 3)
self.num_tracks = num_tracks
self.win_pixel_wh = window_pixel_wh
self.iterations = iterations
super(MotionTrackingLK, self).__init__(**kwargs)
def build(self, input_shape):
# grab the dimensions of the image here so we can use them later. also will throw errors early for users
self.seq_len = input_shape[1][1]
self.h = input_shape[1][2]
self.w = input_shape[1][3]
self.c = input_shape[1][4]
self.center_relative = tf.constant(
[self.w/self.win_pixel_wh, self.h/self.win_pixel_wh],
shape=[1,1,2,1]
)
# we scale to the smaller axis and then apply transforms to that resulting square
# originally was [0.0, 1.0], but this resulted in the model being unable to learn. not sure why. possibly because tanh learns better than sigmoid
x_t, y_t = tf.meshgrid(
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
)
self.sampling_grid = tf.stack([
tf.reshape(x_t, [self.win_pixel_wh*self.win_pixel_wh]),
tf.reshape(y_t, [self.win_pixel_wh*self.win_pixel_wh]),
])
self.sobel_x = tf.constant([
[-1., 0., 1.],
[-2., 0., 2.],
[-1., 0., 1.],
],
shape=[3, 3, 1, 1]
)
self.sobel_y = tf.constant([
[-1., -2., -1.],
[ 0., 0., 0.],
[ 1., 2., 1.],
],
shape=[3, 3, 1, 1]
)
self.scharr_x = tf.constant([
[-3., 0., 3.],
[-10., 0., 10.],
[-3., 0., 3.],
],
shape=[3, 3, 1, 1]
)
self.scharr_y = tf.constant([
[-3., -10., -3.],
[ 0., 0., 0.],
[ 3., 10., 3.],
],
shape=[3, 3, 1, 1]
)
weights = np.empty([self.win_pixel_wh, self.win_pixel_wh])
center = self.win_pixel_wh//2
for y in range(self.win_pixel_wh):
for x in range(self.win_pixel_wh):
weights[y, x] = (x-center)**2 + (y-center)**2
weights = gaussian(np.sqrt(weights), self.sigma)
self.win_weights = tf.constant(weights, shape=[1, 1, self.win_pixel_wh*self.win_pixel_wh, 1], dtype=tf.float32)
# print(weights)
# tf.print(weights)
# tf.print(tf.reduce_max(weights))
super(MotionTrackingLK, self).build(input_shape)
def sample_ntracks_from_2frames(self, sampler, frames):
x = ((sampler[:, :, 0]) * self.win_pixel_wh) * 0.5
y = ((sampler[:, :, 1]) * self.win_pixel_wh) * 0.5
x = tf.reshape(tf.tile(tf.expand_dims(x, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
y = tf.reshape(tf.tile(tf.expand_dims(y, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
x0 = tf.floor(x)
x1 = x0 + 1
y0 = tf.floor(y)
y1 = y0 + 1
x0 = tf.clip_by_value(x0, 0, self.w-1)
x1 = tf.clip_by_value(x1, 0, self.w-1)
y0 = tf.clip_by_value(y0, 0, self.h-1)
y1 = tf.clip_by_value(y1, 0, self.h-1)
wa = tf.expand_dims((y1-y) * (x1-x), axis=-1)
wb = tf.expand_dims((y1-y) * (x-x0), axis=-1)
wc = tf.expand_dims((y-y0) * (x1-x), axis=-1)
wd = tf.expand_dims((y-y0) * (x-x0), axis=-1)
x0 = tf.cast(x0, tf.int32)
x1 = tf.cast(x1, tf.int32)
y0 = tf.cast(y0, tf.int32)
y1 = tf.cast(y1, tf.int32)
# necessary so that first dimension is equal. makes it so that we are repeatedly sampling for each image
tiled_imgs = tf.reshape(frames, [-1, self.h, self.w, self.c])
# batch dimension in this case goes through first frame for each batch, then second frame
Ia = tf.gather_nd(tiled_imgs, tf.stack([y0, x0], axis=-1), batch_dims=1)
Ib = tf.gather_nd(tiled_imgs, tf.stack([y0, x1], axis=-1), batch_dims=1)
Ic = tf.gather_nd(tiled_imgs, tf.stack([y1, x0], axis=-1), batch_dims=1)
Id = tf.gather_nd(tiled_imgs, tf.stack([y1, x1], axis=-1), batch_dims=1)
return tf.reshape(wa*Ia + wb*Ib + wc*Ic + wd*Id, [-1, 2, self.num_tracks, self.win_pixel_wh, self.win_pixel_wh, self.c])
def calc_velocity_2frames_ntracks_LK(self, first_frame, second_frame):
ff_comb = tf.reshape(first_frame, [-1, self.win_pixel_wh, self.win_pixel_wh, self.c])
Ix = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_x, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
Iy = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_y, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
A = tf.concat([Ix,Iy], axis=3)
ATA = tf.matmul(A, A*self.win_weights, transpose_a=True)
# ATA_1 = tf.linalg.inv(ATA)
# tf.linalg.inv gives me a cusolver error, so i generate inverse manually
a = ATA[:,:, 0,0]
b = ATA[:,:, 0,1]
c = ATA[:,:, 1,0]
d = ATA[:,:, 1,1]
ATA_1 = tf.reshape(1/(a*d - b*c + 1e-07), [-1, self.num_tracks, 1, 1])*tf.stack([tf.stack([d, -b], axis=-1), tf.stack([-c, a], axis=-1)], axis=3)
b = -1*tf.reshape(
second_frame-first_frame,
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)*self.win_weights
ATb = tf.matmul(A, b, transpose_a=True)
VxVy = tf.matmul(ATA_1, ATb)
return VxVy
def iterative_LK(self, sampler, frames, iterations):
out = self.sample_ntracks_from_2frames(sampler, frames)
first_frame = out[:, 0]
factor = 1.0
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
sum_VxVy = VxVy
i = tf.constant(1)
cond = lambda i, s, f, sf, svv: tf.less(i, iterations)
def iterate(i, sampler, frames, first_frame, sum_VxVy):
out = self.sample_ntracks_from_2frames(sampler, frames)
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
i += 1
sum_VxVy += VxVy
return i, sampler, frames, first_frame, sum_VxVy
_, sampler, _, _, sum_VxVy = tf.while_loop(cond, iterate, [i, sampler, frames, first_frame, sum_VxVy])
return sampler, tf.reshape(sum_VxVy, [-1, self.num_tracks, 2])
def call(self, inputs):
init_track_locs = tf.reshape(inputs[0], [-1, self.num_tracks, 2, 1]) * self.center_relative + self.center_relative
imgs = inputs[1]
sampler = tf.reshape(self.sampling_grid, [1, 1, 2, -1]) + init_track_locs
init_track_locs = tf.reshape(init_track_locs, [-1, self.num_tracks, 1, 2])
sampler, tot_VxVy = self.iterative_LK(sampler, imgs[:, 0:2], self.iterations)
tot_VxVy = tf.reshape(tot_VxVy, [-1, self.num_tracks, 1, 2])
tot_VxVy = tf.concat([init_track_locs, tot_VxVy + init_track_locs], axis=2)
i = tf.constant(1)
cond = lambda i, s, imgs, tot_VxVy: tf.less(i, self.seq_len-1)
def iterate(i, sampler, imgs, tot_VxVy):
sampler, sum_VxVy = self.iterative_LK(sampler, imgs[:, i:i+2], self.iterations)
sum_VxVy = tf.reshape(sum_VxVy, [-1, self.num_tracks, 1, 2])
prev = tf.reshape(tot_VxVy[:, :, i], [-1, self.num_tracks, 1, 2])
tot_VxVy = tf.concat([tot_VxVy, sum_VxVy+prev], axis=2)
i += 1
return i, sampler, imgs, tot_VxVy
_, sampler, _, tot_VxVy = tf.while_loop(
cond, iterate, [i, sampler, imgs, tot_VxVy],
shape_invariants=[i.get_shape(), sampler.get_shape(), imgs.get_shape(), tf.TensorShape([None, self.num_tracks, None, 2])]
)
tot_VxVy = tf.reshape(tot_VxVy, [-1, self.num_tracks, self.seq_len, 2])
cr = tf.reshape(self.center_relative, [1,1,1,2])
tot_VxVy = (tot_VxVy - cr)/cr
tot_VxVy.set_shape([None, self.num_tracks, self.seq_len, 2])
# tf.print(tot_VxVy)
return tot_VxVy
def compute_output_shape(self, input_shape):
self.seq_len = input_shape[1][1]
return [None, self.num_tracks, self.seq_len, 2]
def get_config(self):
base_config = super(MotionTrackingLK, self).get_config()
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
if __name__ == "__main__":
import numpy as np
import math
window_pixel_wh = 21
num_tracks = 3
sigma = 2
batches = 1
iterations = 5
# some test points to be tracked
transforms = np.asarray(
[[
0.15, 0.09,
-0.28, 0.15,
0.39, -0.69,
]]*batches,
dtype=np.float32
)
imgs = np.asarray([
[
np.expand_dims(cv.imread("car_dashcam0.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam1.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam2.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam3.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam4.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
]
]*batches).astype(np.float32)
_, _, h, w, c = imgs.shape
print(w, h ,c)
out = np.float32(MotionTrackingLK(
num_tracks=num_tracks, window_pixel_wh=window_pixel_wh, sigma=sigma, iterations=iterations)([transforms, imgs])
)
print(out.shape)
imgs_with_tracks = display_tracks(imgs, out)
for img in imgs_with_tracks:
cv.imshow("asdf", img)
cv.waitKey()
# for i in range(2):
# for j in range(num_tracks):
# cv.imshow(f"frame {i}, track {j}", out[0, i,j])
# cv.waitKey()
with open("OUT", "w") as f:
with np.printoptions(threshold=np.inf):
f.write(str(out)) | logical_to_device | identifier_name |
MotionTrackingLK.py | import tensorflow as tf
import cv2 as cv
import numpy as np
import math
def gaussian(x, sigma):
return 1/(sigma*math.sqrt(2*math.pi))*math.e**(-1/2*(x/sigma)**2)
def logical_to_device(w, h, lx, ly):
devx = lx * w/2 + w/2
devy = ly * h/2 + h/2
return int(devx), int(devy)
def device_to_logical(w, h, devx, devy):
lx = (devx - w/2)/(w/2)
ly = (devy - h/2)/(h/2)
return lx, ly
def display_tracks(imgs, batch_tracks):
imgs = np.copy(imgs)
_,_,h,w,c = imgs.shape
imgs_with_tracks = []
for img_seq,tracks in zip(imgs, batch_tracks):
img = img_seq[-1]
for track in tracks:
for t_seq in range(1, len(track)):
lx_p, ly_p = track[t_seq-1]
x_p, y_p = logical_to_device(w, h, lx_p, ly_p)
lx, ly = track[t_seq]
x, y = logical_to_device(w, h, lx, ly)
img = cv.arrowedLine(img, (x_p, y_p), (x, y), (255, 0, 0))
imgs_with_tracks.append(img)
return np.asfarray(imgs_with_tracks)
class MotionTrackingLK(tf.keras.layers.Layer):
def __init__(self, num_tracks, window_pixel_wh=21, sigma=2, iterations=5, **kwargs):
self.sigma = sigma
assert(num_tracks > 1)
assert(window_pixel_wh >= 3)
self.num_tracks = num_tracks
self.win_pixel_wh = window_pixel_wh
self.iterations = iterations
super(MotionTrackingLK, self).__init__(**kwargs)
def build(self, input_shape):
# grab the dimensions of the image here so we can use them later. also will throw errors early for users
self.seq_len = input_shape[1][1]
self.h = input_shape[1][2]
self.w = input_shape[1][3]
self.c = input_shape[1][4]
self.center_relative = tf.constant(
[self.w/self.win_pixel_wh, self.h/self.win_pixel_wh],
shape=[1,1,2,1]
)
# we scale to the smaller axis and then apply transforms to that resulting square
# originally was [0.0, 1.0], but this resulted in the model being unable to learn. not sure why. possibly because tanh learns better than sigmoid
x_t, y_t = tf.meshgrid(
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
)
self.sampling_grid = tf.stack([
tf.reshape(x_t, [self.win_pixel_wh*self.win_pixel_wh]),
tf.reshape(y_t, [self.win_pixel_wh*self.win_pixel_wh]),
])
self.sobel_x = tf.constant([
[-1., 0., 1.],
[-2., 0., 2.],
[-1., 0., 1.],
],
shape=[3, 3, 1, 1]
)
self.sobel_y = tf.constant([
[-1., -2., -1.],
[ 0., 0., 0.],
[ 1., 2., 1.],
],
shape=[3, 3, 1, 1]
)
self.scharr_x = tf.constant([
[-3., 0., 3.],
[-10., 0., 10.],
[-3., 0., 3.],
],
shape=[3, 3, 1, 1]
)
self.scharr_y = tf.constant([
[-3., -10., -3.],
[ 0., 0., 0.],
[ 3., 10., 3.],
],
shape=[3, 3, 1, 1]
)
weights = np.empty([self.win_pixel_wh, self.win_pixel_wh])
center = self.win_pixel_wh//2
for y in range(self.win_pixel_wh):
for x in range(self.win_pixel_wh):
weights[y, x] = (x-center)**2 + (y-center)**2
weights = gaussian(np.sqrt(weights), self.sigma)
self.win_weights = tf.constant(weights, shape=[1, 1, self.win_pixel_wh*self.win_pixel_wh, 1], dtype=tf.float32)
# print(weights)
# tf.print(weights)
# tf.print(tf.reduce_max(weights))
super(MotionTrackingLK, self).build(input_shape)
def sample_ntracks_from_2frames(self, sampler, frames):
x = ((sampler[:, :, 0]) * self.win_pixel_wh) * 0.5
y = ((sampler[:, :, 1]) * self.win_pixel_wh) * 0.5
x = tf.reshape(tf.tile(tf.expand_dims(x, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
y = tf.reshape(tf.tile(tf.expand_dims(y, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
x0 = tf.floor(x)
x1 = x0 + 1
y0 = tf.floor(y)
y1 = y0 + 1
x0 = tf.clip_by_value(x0, 0, self.w-1)
x1 = tf.clip_by_value(x1, 0, self.w-1)
y0 = tf.clip_by_value(y0, 0, self.h-1)
y1 = tf.clip_by_value(y1, 0, self.h-1)
wa = tf.expand_dims((y1-y) * (x1-x), axis=-1)
wb = tf.expand_dims((y1-y) * (x-x0), axis=-1)
wc = tf.expand_dims((y-y0) * (x1-x), axis=-1)
wd = tf.expand_dims((y-y0) * (x-x0), axis=-1)
x0 = tf.cast(x0, tf.int32)
x1 = tf.cast(x1, tf.int32)
y0 = tf.cast(y0, tf.int32)
y1 = tf.cast(y1, tf.int32)
# necessary so that first dimension is equal. makes it so that we are repeatedly sampling for each image
tiled_imgs = tf.reshape(frames, [-1, self.h, self.w, self.c])
# batch dimension in this case goes through first frame for each batch, then second frame
Ia = tf.gather_nd(tiled_imgs, tf.stack([y0, x0], axis=-1), batch_dims=1)
Ib = tf.gather_nd(tiled_imgs, tf.stack([y0, x1], axis=-1), batch_dims=1)
Ic = tf.gather_nd(tiled_imgs, tf.stack([y1, x0], axis=-1), batch_dims=1)
Id = tf.gather_nd(tiled_imgs, tf.stack([y1, x1], axis=-1), batch_dims=1)
return tf.reshape(wa*Ia + wb*Ib + wc*Ic + wd*Id, [-1, 2, self.num_tracks, self.win_pixel_wh, self.win_pixel_wh, self.c])
def calc_velocity_2frames_ntracks_LK(self, first_frame, second_frame):
ff_comb = tf.reshape(first_frame, [-1, self.win_pixel_wh, self.win_pixel_wh, self.c])
Ix = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_x, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
Iy = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_y, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
A = tf.concat([Ix,Iy], axis=3)
ATA = tf.matmul(A, A*self.win_weights, transpose_a=True)
# ATA_1 = tf.linalg.inv(ATA)
# tf.linalg.inv gives me a cusolver error, so i generate inverse manually
a = ATA[:,:, 0,0]
b = ATA[:,:, 0,1]
c = ATA[:,:, 1,0]
d = ATA[:,:, 1,1]
ATA_1 = tf.reshape(1/(a*d - b*c + 1e-07), [-1, self.num_tracks, 1, 1])*tf.stack([tf.stack([d, -b], axis=-1), tf.stack([-c, a], axis=-1)], axis=3)
b = -1*tf.reshape(
second_frame-first_frame,
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)*self.win_weights |
VxVy = tf.matmul(ATA_1, ATb)
return VxVy
def iterative_LK(self, sampler, frames, iterations):
out = self.sample_ntracks_from_2frames(sampler, frames)
first_frame = out[:, 0]
factor = 1.0
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
sum_VxVy = VxVy
i = tf.constant(1)
cond = lambda i, s, f, sf, svv: tf.less(i, iterations)
def iterate(i, sampler, frames, first_frame, sum_VxVy):
out = self.sample_ntracks_from_2frames(sampler, frames)
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
i += 1
sum_VxVy += VxVy
return i, sampler, frames, first_frame, sum_VxVy
_, sampler, _, _, sum_VxVy = tf.while_loop(cond, iterate, [i, sampler, frames, first_frame, sum_VxVy])
return sampler, tf.reshape(sum_VxVy, [-1, self.num_tracks, 2])
def call(self, inputs):
init_track_locs = tf.reshape(inputs[0], [-1, self.num_tracks, 2, 1]) * self.center_relative + self.center_relative
imgs = inputs[1]
sampler = tf.reshape(self.sampling_grid, [1, 1, 2, -1]) + init_track_locs
init_track_locs = tf.reshape(init_track_locs, [-1, self.num_tracks, 1, 2])
sampler, tot_VxVy = self.iterative_LK(sampler, imgs[:, 0:2], self.iterations)
tot_VxVy = tf.reshape(tot_VxVy, [-1, self.num_tracks, 1, 2])
tot_VxVy = tf.concat([init_track_locs, tot_VxVy + init_track_locs], axis=2)
i = tf.constant(1)
cond = lambda i, s, imgs, tot_VxVy: tf.less(i, self.seq_len-1)
def iterate(i, sampler, imgs, tot_VxVy):
sampler, sum_VxVy = self.iterative_LK(sampler, imgs[:, i:i+2], self.iterations)
sum_VxVy = tf.reshape(sum_VxVy, [-1, self.num_tracks, 1, 2])
prev = tf.reshape(tot_VxVy[:, :, i], [-1, self.num_tracks, 1, 2])
tot_VxVy = tf.concat([tot_VxVy, sum_VxVy+prev], axis=2)
i += 1
return i, sampler, imgs, tot_VxVy
_, sampler, _, tot_VxVy = tf.while_loop(
cond, iterate, [i, sampler, imgs, tot_VxVy],
shape_invariants=[i.get_shape(), sampler.get_shape(), imgs.get_shape(), tf.TensorShape([None, self.num_tracks, None, 2])]
)
tot_VxVy = tf.reshape(tot_VxVy, [-1, self.num_tracks, self.seq_len, 2])
cr = tf.reshape(self.center_relative, [1,1,1,2])
tot_VxVy = (tot_VxVy - cr)/cr
tot_VxVy.set_shape([None, self.num_tracks, self.seq_len, 2])
# tf.print(tot_VxVy)
return tot_VxVy
def compute_output_shape(self, input_shape):
self.seq_len = input_shape[1][1]
return [None, self.num_tracks, self.seq_len, 2]
def get_config(self):
base_config = super(MotionTrackingLK, self).get_config()
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
if __name__ == "__main__":
import numpy as np
import math
window_pixel_wh = 21
num_tracks = 3
sigma = 2
batches = 1
iterations = 5
# some test points to be tracked
transforms = np.asarray(
[[
0.15, 0.09,
-0.28, 0.15,
0.39, -0.69,
]]*batches,
dtype=np.float32
)
imgs = np.asarray([
[
np.expand_dims(cv.imread("car_dashcam0.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam1.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam2.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam3.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam4.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
]
]*batches).astype(np.float32)
_, _, h, w, c = imgs.shape
print(w, h ,c)
out = np.float32(MotionTrackingLK(
num_tracks=num_tracks, window_pixel_wh=window_pixel_wh, sigma=sigma, iterations=iterations)([transforms, imgs])
)
print(out.shape)
imgs_with_tracks = display_tracks(imgs, out)
for img in imgs_with_tracks:
cv.imshow("asdf", img)
cv.waitKey()
# for i in range(2):
# for j in range(num_tracks):
# cv.imshow(f"frame {i}, track {j}", out[0, i,j])
# cv.waitKey()
with open("OUT", "w") as f:
with np.printoptions(threshold=np.inf):
f.write(str(out)) | ATb = tf.matmul(A, b, transpose_a=True) | random_line_split |
MotionTrackingLK.py | import tensorflow as tf
import cv2 as cv
import numpy as np
import math
def gaussian(x, sigma):
return 1/(sigma*math.sqrt(2*math.pi))*math.e**(-1/2*(x/sigma)**2)
def logical_to_device(w, h, lx, ly):
devx = lx * w/2 + w/2
devy = ly * h/2 + h/2
return int(devx), int(devy)
def device_to_logical(w, h, devx, devy):
lx = (devx - w/2)/(w/2)
ly = (devy - h/2)/(h/2)
return lx, ly
def display_tracks(imgs, batch_tracks):
imgs = np.copy(imgs)
_,_,h,w,c = imgs.shape
imgs_with_tracks = []
for img_seq,tracks in zip(imgs, batch_tracks):
img = img_seq[-1]
for track in tracks:
for t_seq in range(1, len(track)):
lx_p, ly_p = track[t_seq-1]
x_p, y_p = logical_to_device(w, h, lx_p, ly_p)
lx, ly = track[t_seq]
x, y = logical_to_device(w, h, lx, ly)
img = cv.arrowedLine(img, (x_p, y_p), (x, y), (255, 0, 0))
imgs_with_tracks.append(img)
return np.asfarray(imgs_with_tracks)
class MotionTrackingLK(tf.keras.layers.Layer):
def __init__(self, num_tracks, window_pixel_wh=21, sigma=2, iterations=5, **kwargs):
self.sigma = sigma
assert(num_tracks > 1)
assert(window_pixel_wh >= 3)
self.num_tracks = num_tracks
self.win_pixel_wh = window_pixel_wh
self.iterations = iterations
super(MotionTrackingLK, self).__init__(**kwargs)
def build(self, input_shape):
# grab the dimensions of the image here so we can use them later. also will throw errors early for users
self.seq_len = input_shape[1][1]
self.h = input_shape[1][2]
self.w = input_shape[1][3]
self.c = input_shape[1][4]
self.center_relative = tf.constant(
[self.w/self.win_pixel_wh, self.h/self.win_pixel_wh],
shape=[1,1,2,1]
)
# we scale to the smaller axis and then apply transforms to that resulting square
# originally was [0.0, 1.0], but this resulted in the model being unable to learn. not sure why. possibly because tanh learns better than sigmoid
x_t, y_t = tf.meshgrid(
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
)
self.sampling_grid = tf.stack([
tf.reshape(x_t, [self.win_pixel_wh*self.win_pixel_wh]),
tf.reshape(y_t, [self.win_pixel_wh*self.win_pixel_wh]),
])
self.sobel_x = tf.constant([
[-1., 0., 1.],
[-2., 0., 2.],
[-1., 0., 1.],
],
shape=[3, 3, 1, 1]
)
self.sobel_y = tf.constant([
[-1., -2., -1.],
[ 0., 0., 0.],
[ 1., 2., 1.],
],
shape=[3, 3, 1, 1]
)
self.scharr_x = tf.constant([
[-3., 0., 3.],
[-10., 0., 10.],
[-3., 0., 3.],
],
shape=[3, 3, 1, 1]
)
self.scharr_y = tf.constant([
[-3., -10., -3.],
[ 0., 0., 0.],
[ 3., 10., 3.],
],
shape=[3, 3, 1, 1]
)
weights = np.empty([self.win_pixel_wh, self.win_pixel_wh])
center = self.win_pixel_wh//2
for y in range(self.win_pixel_wh):
for x in range(self.win_pixel_wh):
weights[y, x] = (x-center)**2 + (y-center)**2
weights = gaussian(np.sqrt(weights), self.sigma)
self.win_weights = tf.constant(weights, shape=[1, 1, self.win_pixel_wh*self.win_pixel_wh, 1], dtype=tf.float32)
# print(weights)
# tf.print(weights)
# tf.print(tf.reduce_max(weights))
super(MotionTrackingLK, self).build(input_shape)
def sample_ntracks_from_2frames(self, sampler, frames):
x = ((sampler[:, :, 0]) * self.win_pixel_wh) * 0.5
y = ((sampler[:, :, 1]) * self.win_pixel_wh) * 0.5
x = tf.reshape(tf.tile(tf.expand_dims(x, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
y = tf.reshape(tf.tile(tf.expand_dims(y, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
x0 = tf.floor(x)
x1 = x0 + 1
y0 = tf.floor(y)
y1 = y0 + 1
x0 = tf.clip_by_value(x0, 0, self.w-1)
x1 = tf.clip_by_value(x1, 0, self.w-1)
y0 = tf.clip_by_value(y0, 0, self.h-1)
y1 = tf.clip_by_value(y1, 0, self.h-1)
wa = tf.expand_dims((y1-y) * (x1-x), axis=-1)
wb = tf.expand_dims((y1-y) * (x-x0), axis=-1)
wc = tf.expand_dims((y-y0) * (x1-x), axis=-1)
wd = tf.expand_dims((y-y0) * (x-x0), axis=-1)
x0 = tf.cast(x0, tf.int32)
x1 = tf.cast(x1, tf.int32)
y0 = tf.cast(y0, tf.int32)
y1 = tf.cast(y1, tf.int32)
# necessary so that first dimension is equal. makes it so that we are repeatedly sampling for each image
tiled_imgs = tf.reshape(frames, [-1, self.h, self.w, self.c])
# batch dimension in this case goes through first frame for each batch, then second frame
Ia = tf.gather_nd(tiled_imgs, tf.stack([y0, x0], axis=-1), batch_dims=1)
Ib = tf.gather_nd(tiled_imgs, tf.stack([y0, x1], axis=-1), batch_dims=1)
Ic = tf.gather_nd(tiled_imgs, tf.stack([y1, x0], axis=-1), batch_dims=1)
Id = tf.gather_nd(tiled_imgs, tf.stack([y1, x1], axis=-1), batch_dims=1)
return tf.reshape(wa*Ia + wb*Ib + wc*Ic + wd*Id, [-1, 2, self.num_tracks, self.win_pixel_wh, self.win_pixel_wh, self.c])
def calc_velocity_2frames_ntracks_LK(self, first_frame, second_frame):
ff_comb = tf.reshape(first_frame, [-1, self.win_pixel_wh, self.win_pixel_wh, self.c])
Ix = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_x, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
Iy = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_y, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
A = tf.concat([Ix,Iy], axis=3)
ATA = tf.matmul(A, A*self.win_weights, transpose_a=True)
# ATA_1 = tf.linalg.inv(ATA)
# tf.linalg.inv gives me a cusolver error, so i generate inverse manually
a = ATA[:,:, 0,0]
b = ATA[:,:, 0,1]
c = ATA[:,:, 1,0]
d = ATA[:,:, 1,1]
ATA_1 = tf.reshape(1/(a*d - b*c + 1e-07), [-1, self.num_tracks, 1, 1])*tf.stack([tf.stack([d, -b], axis=-1), tf.stack([-c, a], axis=-1)], axis=3)
b = -1*tf.reshape(
second_frame-first_frame,
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)*self.win_weights
ATb = tf.matmul(A, b, transpose_a=True)
VxVy = tf.matmul(ATA_1, ATb)
return VxVy
def iterative_LK(self, sampler, frames, iterations):
out = self.sample_ntracks_from_2frames(sampler, frames)
first_frame = out[:, 0]
factor = 1.0
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
sum_VxVy = VxVy
i = tf.constant(1)
cond = lambda i, s, f, sf, svv: tf.less(i, iterations)
def iterate(i, sampler, frames, first_frame, sum_VxVy):
out = self.sample_ntracks_from_2frames(sampler, frames)
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
i += 1
sum_VxVy += VxVy
return i, sampler, frames, first_frame, sum_VxVy
_, sampler, _, _, sum_VxVy = tf.while_loop(cond, iterate, [i, sampler, frames, first_frame, sum_VxVy])
return sampler, tf.reshape(sum_VxVy, [-1, self.num_tracks, 2])
def call(self, inputs):
init_track_locs = tf.reshape(inputs[0], [-1, self.num_tracks, 2, 1]) * self.center_relative + self.center_relative
imgs = inputs[1]
sampler = tf.reshape(self.sampling_grid, [1, 1, 2, -1]) + init_track_locs
init_track_locs = tf.reshape(init_track_locs, [-1, self.num_tracks, 1, 2])
sampler, tot_VxVy = self.iterative_LK(sampler, imgs[:, 0:2], self.iterations)
tot_VxVy = tf.reshape(tot_VxVy, [-1, self.num_tracks, 1, 2])
tot_VxVy = tf.concat([init_track_locs, tot_VxVy + init_track_locs], axis=2)
i = tf.constant(1)
cond = lambda i, s, imgs, tot_VxVy: tf.less(i, self.seq_len-1)
def iterate(i, sampler, imgs, tot_VxVy):
|
_, sampler, _, tot_VxVy = tf.while_loop(
cond, iterate, [i, sampler, imgs, tot_VxVy],
shape_invariants=[i.get_shape(), sampler.get_shape(), imgs.get_shape(), tf.TensorShape([None, self.num_tracks, None, 2])]
)
tot_VxVy = tf.reshape(tot_VxVy, [-1, self.num_tracks, self.seq_len, 2])
cr = tf.reshape(self.center_relative, [1,1,1,2])
tot_VxVy = (tot_VxVy - cr)/cr
tot_VxVy.set_shape([None, self.num_tracks, self.seq_len, 2])
# tf.print(tot_VxVy)
return tot_VxVy
def compute_output_shape(self, input_shape):
self.seq_len = input_shape[1][1]
return [None, self.num_tracks, self.seq_len, 2]
def get_config(self):
base_config = super(MotionTrackingLK, self).get_config()
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
if __name__ == "__main__":
import numpy as np
import math
window_pixel_wh = 21
num_tracks = 3
sigma = 2
batches = 1
iterations = 5
# some test points to be tracked
transforms = np.asarray(
[[
0.15, 0.09,
-0.28, 0.15,
0.39, -0.69,
]]*batches,
dtype=np.float32
)
imgs = np.asarray([
[
np.expand_dims(cv.imread("car_dashcam0.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam1.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam2.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam3.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam4.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
]
]*batches).astype(np.float32)
_, _, h, w, c = imgs.shape
print(w, h ,c)
out = np.float32(MotionTrackingLK(
num_tracks=num_tracks, window_pixel_wh=window_pixel_wh, sigma=sigma, iterations=iterations)([transforms, imgs])
)
print(out.shape)
imgs_with_tracks = display_tracks(imgs, out)
for img in imgs_with_tracks:
cv.imshow("asdf", img)
cv.waitKey()
# for i in range(2):
# for j in range(num_tracks):
# cv.imshow(f"frame {i}, track {j}", out[0, i,j])
# cv.waitKey()
with open("OUT", "w") as f:
with np.printoptions(threshold=np.inf):
f.write(str(out)) | sampler, sum_VxVy = self.iterative_LK(sampler, imgs[:, i:i+2], self.iterations)
sum_VxVy = tf.reshape(sum_VxVy, [-1, self.num_tracks, 1, 2])
prev = tf.reshape(tot_VxVy[:, :, i], [-1, self.num_tracks, 1, 2])
tot_VxVy = tf.concat([tot_VxVy, sum_VxVy+prev], axis=2)
i += 1
return i, sampler, imgs, tot_VxVy | identifier_body |
MotionTrackingLK.py | import tensorflow as tf
import cv2 as cv
import numpy as np
import math
def gaussian(x, sigma):
return 1/(sigma*math.sqrt(2*math.pi))*math.e**(-1/2*(x/sigma)**2)
def logical_to_device(w, h, lx, ly):
devx = lx * w/2 + w/2
devy = ly * h/2 + h/2
return int(devx), int(devy)
def device_to_logical(w, h, devx, devy):
lx = (devx - w/2)/(w/2)
ly = (devy - h/2)/(h/2)
return lx, ly
def display_tracks(imgs, batch_tracks):
imgs = np.copy(imgs)
_,_,h,w,c = imgs.shape
imgs_with_tracks = []
for img_seq,tracks in zip(imgs, batch_tracks):
img = img_seq[-1]
for track in tracks:
|
imgs_with_tracks.append(img)
return np.asfarray(imgs_with_tracks)
class MotionTrackingLK(tf.keras.layers.Layer):
def __init__(self, num_tracks, window_pixel_wh=21, sigma=2, iterations=5, **kwargs):
self.sigma = sigma
assert(num_tracks > 1)
assert(window_pixel_wh >= 3)
self.num_tracks = num_tracks
self.win_pixel_wh = window_pixel_wh
self.iterations = iterations
super(MotionTrackingLK, self).__init__(**kwargs)
def build(self, input_shape):
# grab the dimensions of the image here so we can use them later. also will throw errors early for users
self.seq_len = input_shape[1][1]
self.h = input_shape[1][2]
self.w = input_shape[1][3]
self.c = input_shape[1][4]
self.center_relative = tf.constant(
[self.w/self.win_pixel_wh, self.h/self.win_pixel_wh],
shape=[1,1,2,1]
)
# we scale to the smaller axis and then apply transforms to that resulting square
# originally was [0.0, 1.0], but this resulted in the model being unable to learn. not sure why. possibly because tanh learns better than sigmoid
x_t, y_t = tf.meshgrid(
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
)
self.sampling_grid = tf.stack([
tf.reshape(x_t, [self.win_pixel_wh*self.win_pixel_wh]),
tf.reshape(y_t, [self.win_pixel_wh*self.win_pixel_wh]),
])
self.sobel_x = tf.constant([
[-1., 0., 1.],
[-2., 0., 2.],
[-1., 0., 1.],
],
shape=[3, 3, 1, 1]
)
self.sobel_y = tf.constant([
[-1., -2., -1.],
[ 0., 0., 0.],
[ 1., 2., 1.],
],
shape=[3, 3, 1, 1]
)
self.scharr_x = tf.constant([
[-3., 0., 3.],
[-10., 0., 10.],
[-3., 0., 3.],
],
shape=[3, 3, 1, 1]
)
self.scharr_y = tf.constant([
[-3., -10., -3.],
[ 0., 0., 0.],
[ 3., 10., 3.],
],
shape=[3, 3, 1, 1]
)
weights = np.empty([self.win_pixel_wh, self.win_pixel_wh])
center = self.win_pixel_wh//2
for y in range(self.win_pixel_wh):
for x in range(self.win_pixel_wh):
weights[y, x] = (x-center)**2 + (y-center)**2
weights = gaussian(np.sqrt(weights), self.sigma)
self.win_weights = tf.constant(weights, shape=[1, 1, self.win_pixel_wh*self.win_pixel_wh, 1], dtype=tf.float32)
# print(weights)
# tf.print(weights)
# tf.print(tf.reduce_max(weights))
super(MotionTrackingLK, self).build(input_shape)
def sample_ntracks_from_2frames(self, sampler, frames):
x = ((sampler[:, :, 0]) * self.win_pixel_wh) * 0.5
y = ((sampler[:, :, 1]) * self.win_pixel_wh) * 0.5
x = tf.reshape(tf.tile(tf.expand_dims(x, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
y = tf.reshape(tf.tile(tf.expand_dims(y, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
x0 = tf.floor(x)
x1 = x0 + 1
y0 = tf.floor(y)
y1 = y0 + 1
x0 = tf.clip_by_value(x0, 0, self.w-1)
x1 = tf.clip_by_value(x1, 0, self.w-1)
y0 = tf.clip_by_value(y0, 0, self.h-1)
y1 = tf.clip_by_value(y1, 0, self.h-1)
wa = tf.expand_dims((y1-y) * (x1-x), axis=-1)
wb = tf.expand_dims((y1-y) * (x-x0), axis=-1)
wc = tf.expand_dims((y-y0) * (x1-x), axis=-1)
wd = tf.expand_dims((y-y0) * (x-x0), axis=-1)
x0 = tf.cast(x0, tf.int32)
x1 = tf.cast(x1, tf.int32)
y0 = tf.cast(y0, tf.int32)
y1 = tf.cast(y1, tf.int32)
# necessary so that first dimension is equal. makes it so that we are repeatedly sampling for each image
tiled_imgs = tf.reshape(frames, [-1, self.h, self.w, self.c])
# batch dimension in this case goes through first frame for each batch, then second frame
Ia = tf.gather_nd(tiled_imgs, tf.stack([y0, x0], axis=-1), batch_dims=1)
Ib = tf.gather_nd(tiled_imgs, tf.stack([y0, x1], axis=-1), batch_dims=1)
Ic = tf.gather_nd(tiled_imgs, tf.stack([y1, x0], axis=-1), batch_dims=1)
Id = tf.gather_nd(tiled_imgs, tf.stack([y1, x1], axis=-1), batch_dims=1)
return tf.reshape(wa*Ia + wb*Ib + wc*Ic + wd*Id, [-1, 2, self.num_tracks, self.win_pixel_wh, self.win_pixel_wh, self.c])
def calc_velocity_2frames_ntracks_LK(self, first_frame, second_frame):
ff_comb = tf.reshape(first_frame, [-1, self.win_pixel_wh, self.win_pixel_wh, self.c])
Ix = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_x, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
Iy = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_y, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
A = tf.concat([Ix,Iy], axis=3)
ATA = tf.matmul(A, A*self.win_weights, transpose_a=True)
# ATA_1 = tf.linalg.inv(ATA)
# tf.linalg.inv gives me a cusolver error, so i generate inverse manually
a = ATA[:,:, 0,0]
b = ATA[:,:, 0,1]
c = ATA[:,:, 1,0]
d = ATA[:,:, 1,1]
ATA_1 = tf.reshape(1/(a*d - b*c + 1e-07), [-1, self.num_tracks, 1, 1])*tf.stack([tf.stack([d, -b], axis=-1), tf.stack([-c, a], axis=-1)], axis=3)
b = -1*tf.reshape(
second_frame-first_frame,
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)*self.win_weights
ATb = tf.matmul(A, b, transpose_a=True)
VxVy = tf.matmul(ATA_1, ATb)
return VxVy
def iterative_LK(self, sampler, frames, iterations):
out = self.sample_ntracks_from_2frames(sampler, frames)
first_frame = out[:, 0]
factor = 1.0
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
sum_VxVy = VxVy
i = tf.constant(1)
cond = lambda i, s, f, sf, svv: tf.less(i, iterations)
def iterate(i, sampler, frames, first_frame, sum_VxVy):
out = self.sample_ntracks_from_2frames(sampler, frames)
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
i += 1
sum_VxVy += VxVy
return i, sampler, frames, first_frame, sum_VxVy
_, sampler, _, _, sum_VxVy = tf.while_loop(cond, iterate, [i, sampler, frames, first_frame, sum_VxVy])
return sampler, tf.reshape(sum_VxVy, [-1, self.num_tracks, 2])
def call(self, inputs):
init_track_locs = tf.reshape(inputs[0], [-1, self.num_tracks, 2, 1]) * self.center_relative + self.center_relative
imgs = inputs[1]
sampler = tf.reshape(self.sampling_grid, [1, 1, 2, -1]) + init_track_locs
init_track_locs = tf.reshape(init_track_locs, [-1, self.num_tracks, 1, 2])
sampler, tot_VxVy = self.iterative_LK(sampler, imgs[:, 0:2], self.iterations)
tot_VxVy = tf.reshape(tot_VxVy, [-1, self.num_tracks, 1, 2])
tot_VxVy = tf.concat([init_track_locs, tot_VxVy + init_track_locs], axis=2)
i = tf.constant(1)
cond = lambda i, s, imgs, tot_VxVy: tf.less(i, self.seq_len-1)
def iterate(i, sampler, imgs, tot_VxVy):
sampler, sum_VxVy = self.iterative_LK(sampler, imgs[:, i:i+2], self.iterations)
sum_VxVy = tf.reshape(sum_VxVy, [-1, self.num_tracks, 1, 2])
prev = tf.reshape(tot_VxVy[:, :, i], [-1, self.num_tracks, 1, 2])
tot_VxVy = tf.concat([tot_VxVy, sum_VxVy+prev], axis=2)
i += 1
return i, sampler, imgs, tot_VxVy
_, sampler, _, tot_VxVy = tf.while_loop(
cond, iterate, [i, sampler, imgs, tot_VxVy],
shape_invariants=[i.get_shape(), sampler.get_shape(), imgs.get_shape(), tf.TensorShape([None, self.num_tracks, None, 2])]
)
tot_VxVy = tf.reshape(tot_VxVy, [-1, self.num_tracks, self.seq_len, 2])
cr = tf.reshape(self.center_relative, [1,1,1,2])
tot_VxVy = (tot_VxVy - cr)/cr
tot_VxVy.set_shape([None, self.num_tracks, self.seq_len, 2])
# tf.print(tot_VxVy)
return tot_VxVy
def compute_output_shape(self, input_shape):
self.seq_len = input_shape[1][1]
return [None, self.num_tracks, self.seq_len, 2]
def get_config(self):
base_config = super(MotionTrackingLK, self).get_config()
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
if __name__ == "__main__":
import numpy as np
import math
window_pixel_wh = 21
num_tracks = 3
sigma = 2
batches = 1
iterations = 5
# some test points to be tracked
transforms = np.asarray(
[[
0.15, 0.09,
-0.28, 0.15,
0.39, -0.69,
]]*batches,
dtype=np.float32
)
imgs = np.asarray([
[
np.expand_dims(cv.imread("car_dashcam0.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam1.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam2.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam3.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
np.expand_dims(cv.imread("car_dashcam4.png", cv.IMREAD_GRAYSCALE) / 255, axis=-1),
]
]*batches).astype(np.float32)
_, _, h, w, c = imgs.shape
print(w, h ,c)
out = np.float32(MotionTrackingLK(
num_tracks=num_tracks, window_pixel_wh=window_pixel_wh, sigma=sigma, iterations=iterations)([transforms, imgs])
)
print(out.shape)
imgs_with_tracks = display_tracks(imgs, out)
for img in imgs_with_tracks:
cv.imshow("asdf", img)
cv.waitKey()
# for i in range(2):
# for j in range(num_tracks):
# cv.imshow(f"frame {i}, track {j}", out[0, i,j])
# cv.waitKey()
with open("OUT", "w") as f:
with np.printoptions(threshold=np.inf):
f.write(str(out)) | for t_seq in range(1, len(track)):
lx_p, ly_p = track[t_seq-1]
x_p, y_p = logical_to_device(w, h, lx_p, ly_p)
lx, ly = track[t_seq]
x, y = logical_to_device(w, h, lx, ly)
img = cv.arrowedLine(img, (x_p, y_p), (x, y), (255, 0, 0)) | conditional_block |
main.rs | extern crate jemallocator;
extern crate num_cpus;
extern crate quick_protobuf;
mod osm_pbf;
use crossbeam_channel::{bounded, unbounded};
use crossbeam_utils::thread;
use memmap::MmapOptions;
use osm_pbf::{Blob, BlobHeader, DenseNodes, Info, Node, PrimitiveBlock, Relation, Way};
use quick_protobuf::{BytesReader, MessageRead};
use std::cmp::{max, min};
use std::fs::File;
use std::panic;
use std::process;
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
const WORK_BOUND: usize = 4000;
const MAX_COMPRESSED_BLOB_SIZE: i32 = 64 * 1024;
const MAX_DECOMPRESSED_BLOB_SIZE: i32 = 32 * 1024 * 1024;
#[derive(Debug)]
struct | {
timestamp_min: i64,
timestamp_max: i64,
nodes: u64,
ways: u64,
relations: u64,
lon_min: f64,
lon_max: f64,
lat_min: f64,
lat_max: f64,
}
fn main() {
let args: Vec<_> = std::env::args_os().collect();
let filename = &args[1];
let orig_handler = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
let handler = &orig_handler;
handler(panic_info);
process::exit(1);
}));
match do_processing(filename, num_cpus::get()) {
Ok(result) => println!("{}", result),
Err(err) => println!("{}", err),
}
}
fn do_processing(filename: &std::ffi::OsStr, thread_count: usize) -> Result<String, String> {
let file_handle = File::open(filename).or(Err("unable to open file"))?;
let mmap = unsafe {
MmapOptions::new()
.map(&file_handle)
.or(Err("unable to mmap"))?
};
let bytes = &mmap[..];
let mut reader = BytesReader::from_bytes(&bytes);
let mut sent_messages = 0;
let (sender, receiver) = bounded::<Blob>(WORK_BOUND);
let (return_sender, return_received) = unbounded::<OsmStats>();
thread::scope(|s| {
for _ in 0..thread_count {
let cloned_receiver = receiver.clone();
let cloned_return_sender = return_sender.clone();
s.spawn(move |_| {
let mut buffer = Vec::with_capacity(MAX_DECOMPRESSED_BLOB_SIZE as usize);
let mut stats = empty_osm_stats();
loop {
match cloned_receiver.recv() {
Ok(blob) => {
handle_block(&mut stats, &blob, &mut buffer);
buffer.clear();
}
Err(_e) => break,
}
}
cloned_return_sender
.send(stats)
.expect("failed to send size result");
});
}
loop {
let header_size = match reader.read_sfixed32(bytes).map(|value| value.swap_bytes()) {
Ok(size) if size > MAX_COMPRESSED_BLOB_SIZE => {
return Err("invalid data, compressed blob too large".to_string())
}
Ok(size) => size,
Err(_e) => break,
} as usize;
let blob_header = reader
.read_message_by_len::<BlobHeader>(&bytes, header_size)
.expect("failed to read blob header");
let blob = reader
.read_message_by_len::<Blob>(bytes, blob_header.datasize as usize)
.expect("failed to read blob");
if blob.raw_size.unwrap_or(0) > MAX_DECOMPRESSED_BLOB_SIZE {
return Err("invalid data, uncompressed blob too large".to_string());
}
if blob_header.type_pb == "OSMData" {
sent_messages += 1;
sender.send(blob).expect("failed to send blob");
}
}
drop(sender);
let mut received_messages = 0;
let mut osm_stats = empty_osm_stats();
while received_messages < thread_count {
let worker_stats = return_received.recv().unwrap();
osm_stats.nodes += worker_stats.nodes;
osm_stats.ways += worker_stats.ways;
osm_stats.relations += worker_stats.relations;
osm_stats.timestamp_max = max(osm_stats.timestamp_max, worker_stats.timestamp_max);
osm_stats.timestamp_min = min(osm_stats.timestamp_min, worker_stats.timestamp_min);
if worker_stats.lat_max > osm_stats.lat_max {
osm_stats.lat_max = worker_stats.lat_max
}
if worker_stats.lat_min < osm_stats.lat_min {
osm_stats.lat_min = worker_stats.lat_min
}
if worker_stats.lon_max > osm_stats.lon_max {
osm_stats.lon_max = worker_stats.lon_max
}
if worker_stats.lon_min < osm_stats.lon_min {
osm_stats.lon_min = worker_stats.lon_min
}
received_messages += 1;
}
Ok(format!("{:#?}", osm_stats))
})
.unwrap()
}
fn handle_block(mut osm_stats: &mut OsmStats, blob: &Blob, buffer: &mut Vec<u8>) {
let zlib_data_ref = blob.zlib_data.as_ref();
let tried_block = if blob.raw.is_some() {
let bytes = blob.raw.as_ref().unwrap();
let mut reader = BytesReader::from_bytes(&bytes);
Some(
PrimitiveBlock::from_reader(&mut reader, &bytes)
.expect("failed to read primitive block"),
)
} else if zlib_data_ref.is_some() {
use flate2::{Decompress, FlushDecompress};
let mut decompress = Decompress::new(true);
decompress
.decompress_vec(&zlib_data_ref.unwrap(), buffer, FlushDecompress::Finish)
.expect("error decompressing");
let mut reader = BytesReader::from_bytes(&buffer);
Some(
PrimitiveBlock::from_reader(&mut reader, &buffer)
.expect("failed to read gzipped primitive block"),
)
} else {
None
};
let block = tried_block.unwrap();
handle_primitive_block(&mut osm_stats, &block);
}
fn handle_primitive_block(mut osm_stats: &mut OsmStats, block: &PrimitiveBlock) {
for primitive in &block.primitivegroup {
if let Some(dense_nodes) = &primitive.dense {
handle_dense_nodes(&mut osm_stats, &dense_nodes, &block);
}
for node in &primitive.nodes {
handle_node(&mut osm_stats, &node, &block);
}
for way in &primitive.ways {
handle_way(&mut osm_stats, &way, &block);
}
for relation in &primitive.relations {
handle_relation(&mut osm_stats, &relation, &block);
}
}
}
fn handle_dense_nodes(
mut osm_stats: &mut OsmStats,
dense_nodes: &DenseNodes,
primitive: &PrimitiveBlock,
) {
osm_stats.nodes += dense_nodes.id.len() as u64;
if let Some(dense_info) = &dense_nodes.denseinfo {
let mut last_timestamp = 0;
for delta_timestamp in &dense_info.timestamp {
let timestamp = last_timestamp + delta_timestamp;
handle_timestamp(&mut osm_stats, timestamp, primitive.date_granularity);
last_timestamp = timestamp;
}
}
let mut last_latitude = 0;
for delta_latitude in &dense_nodes.lat {
let latitude = last_latitude + delta_latitude;
handle_latitude(&mut osm_stats, latitude, &primitive);
last_latitude = latitude;
}
let mut last_longitude = 0;
for delta_longitude in &dense_nodes.lon {
let longitude = last_longitude + delta_longitude;
handle_longitude(&mut osm_stats, longitude, &primitive);
last_longitude = longitude;
}
}
fn handle_node(mut osm_stats: &mut OsmStats, node: &Node, primitive: &PrimitiveBlock) {
osm_stats.nodes += 1;
if let Some(info) = &node.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
handle_latitude(&mut osm_stats, node.lat, &primitive);
handle_longitude(&mut osm_stats, node.lon, &primitive);
}
fn handle_way(mut osm_stats: &mut OsmStats, way: &Way, primitive: &PrimitiveBlock) {
osm_stats.ways += 1;
if let Some(info) = &way.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_relation(mut osm_stats: &mut OsmStats, relation: &Relation, primitive: &PrimitiveBlock) {
osm_stats.relations += 1;
if let Some(info) = &relation.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_info(mut osm_stats: &mut OsmStats, info: &Info, date_granularity: i32) {
if let Some(timestamp) = info.timestamp {
handle_timestamp(&mut osm_stats, timestamp, date_granularity);
}
}
fn handle_timestamp(osm_stats: &mut OsmStats, timestamp: i64, date_granularity: i32) {
let millisec_stamp = timestamp * (date_granularity as i64);
if millisec_stamp < osm_stats.timestamp_min {
osm_stats.timestamp_min = millisec_stamp
}
if millisec_stamp > osm_stats.timestamp_max {
osm_stats.timestamp_max = millisec_stamp
}
}
fn handle_latitude(osm_stats: &mut OsmStats, latitude: i64, primitive: &PrimitiveBlock) {
let latitude_f =
0.000000001 * ((primitive.lat_offset + ((primitive.granularity as i64) * latitude)) as f64);
if latitude_f < osm_stats.lat_min {
osm_stats.lat_min = latitude_f
}
if latitude_f > osm_stats.lat_max {
osm_stats.lat_max = latitude_f
}
}
fn handle_longitude(osm_stats: &mut OsmStats, longitude: i64, primitive: &PrimitiveBlock) {
let longitude_f = 0.000000001
* ((primitive.lon_offset + ((primitive.granularity as i64) * longitude)) as f64);
if longitude_f < osm_stats.lon_min {
osm_stats.lon_min = longitude_f
}
if longitude_f > osm_stats.lon_max {
osm_stats.lon_max = longitude_f
}
}
fn empty_osm_stats() -> OsmStats {
OsmStats {
nodes: 0,
relations: 0,
timestamp_max: std::i64::MIN,
timestamp_min: std::i64::MAX,
ways: 0,
lat_min: 100.0,
lat_max: -100.0,
lon_max: -200.0,
lon_min: 200.0,
}
}
| OsmStats | identifier_name |
main.rs | extern crate jemallocator;
extern crate num_cpus;
extern crate quick_protobuf;
mod osm_pbf;
use crossbeam_channel::{bounded, unbounded};
use crossbeam_utils::thread;
use memmap::MmapOptions;
use osm_pbf::{Blob, BlobHeader, DenseNodes, Info, Node, PrimitiveBlock, Relation, Way};
use quick_protobuf::{BytesReader, MessageRead};
use std::cmp::{max, min};
use std::fs::File;
use std::panic;
use std::process;
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
const WORK_BOUND: usize = 4000;
const MAX_COMPRESSED_BLOB_SIZE: i32 = 64 * 1024;
const MAX_DECOMPRESSED_BLOB_SIZE: i32 = 32 * 1024 * 1024;
#[derive(Debug)]
struct OsmStats {
timestamp_min: i64,
timestamp_max: i64,
nodes: u64,
ways: u64,
relations: u64,
lon_min: f64,
lon_max: f64,
lat_min: f64,
lat_max: f64,
}
fn main() {
let args: Vec<_> = std::env::args_os().collect();
let filename = &args[1];
let orig_handler = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
let handler = &orig_handler;
handler(panic_info);
process::exit(1);
}));
match do_processing(filename, num_cpus::get()) {
Ok(result) => println!("{}", result),
Err(err) => println!("{}", err),
}
}
fn do_processing(filename: &std::ffi::OsStr, thread_count: usize) -> Result<String, String> {
let file_handle = File::open(filename).or(Err("unable to open file"))?;
let mmap = unsafe {
MmapOptions::new()
.map(&file_handle)
.or(Err("unable to mmap"))?
};
let bytes = &mmap[..];
let mut reader = BytesReader::from_bytes(&bytes);
let mut sent_messages = 0; | let (sender, receiver) = bounded::<Blob>(WORK_BOUND);
let (return_sender, return_received) = unbounded::<OsmStats>();
thread::scope(|s| {
for _ in 0..thread_count {
let cloned_receiver = receiver.clone();
let cloned_return_sender = return_sender.clone();
s.spawn(move |_| {
let mut buffer = Vec::with_capacity(MAX_DECOMPRESSED_BLOB_SIZE as usize);
let mut stats = empty_osm_stats();
loop {
match cloned_receiver.recv() {
Ok(blob) => {
handle_block(&mut stats, &blob, &mut buffer);
buffer.clear();
}
Err(_e) => break,
}
}
cloned_return_sender
.send(stats)
.expect("failed to send size result");
});
}
loop {
let header_size = match reader.read_sfixed32(bytes).map(|value| value.swap_bytes()) {
Ok(size) if size > MAX_COMPRESSED_BLOB_SIZE => {
return Err("invalid data, compressed blob too large".to_string())
}
Ok(size) => size,
Err(_e) => break,
} as usize;
let blob_header = reader
.read_message_by_len::<BlobHeader>(&bytes, header_size)
.expect("failed to read blob header");
let blob = reader
.read_message_by_len::<Blob>(bytes, blob_header.datasize as usize)
.expect("failed to read blob");
if blob.raw_size.unwrap_or(0) > MAX_DECOMPRESSED_BLOB_SIZE {
return Err("invalid data, uncompressed blob too large".to_string());
}
if blob_header.type_pb == "OSMData" {
sent_messages += 1;
sender.send(blob).expect("failed to send blob");
}
}
drop(sender);
let mut received_messages = 0;
let mut osm_stats = empty_osm_stats();
while received_messages < thread_count {
let worker_stats = return_received.recv().unwrap();
osm_stats.nodes += worker_stats.nodes;
osm_stats.ways += worker_stats.ways;
osm_stats.relations += worker_stats.relations;
osm_stats.timestamp_max = max(osm_stats.timestamp_max, worker_stats.timestamp_max);
osm_stats.timestamp_min = min(osm_stats.timestamp_min, worker_stats.timestamp_min);
if worker_stats.lat_max > osm_stats.lat_max {
osm_stats.lat_max = worker_stats.lat_max
}
if worker_stats.lat_min < osm_stats.lat_min {
osm_stats.lat_min = worker_stats.lat_min
}
if worker_stats.lon_max > osm_stats.lon_max {
osm_stats.lon_max = worker_stats.lon_max
}
if worker_stats.lon_min < osm_stats.lon_min {
osm_stats.lon_min = worker_stats.lon_min
}
received_messages += 1;
}
Ok(format!("{:#?}", osm_stats))
})
.unwrap()
}
fn handle_block(mut osm_stats: &mut OsmStats, blob: &Blob, buffer: &mut Vec<u8>) {
let zlib_data_ref = blob.zlib_data.as_ref();
let tried_block = if blob.raw.is_some() {
let bytes = blob.raw.as_ref().unwrap();
let mut reader = BytesReader::from_bytes(&bytes);
Some(
PrimitiveBlock::from_reader(&mut reader, &bytes)
.expect("failed to read primitive block"),
)
} else if zlib_data_ref.is_some() {
use flate2::{Decompress, FlushDecompress};
let mut decompress = Decompress::new(true);
decompress
.decompress_vec(&zlib_data_ref.unwrap(), buffer, FlushDecompress::Finish)
.expect("error decompressing");
let mut reader = BytesReader::from_bytes(&buffer);
Some(
PrimitiveBlock::from_reader(&mut reader, &buffer)
.expect("failed to read gzipped primitive block"),
)
} else {
None
};
let block = tried_block.unwrap();
handle_primitive_block(&mut osm_stats, &block);
}
fn handle_primitive_block(mut osm_stats: &mut OsmStats, block: &PrimitiveBlock) {
for primitive in &block.primitivegroup {
if let Some(dense_nodes) = &primitive.dense {
handle_dense_nodes(&mut osm_stats, &dense_nodes, &block);
}
for node in &primitive.nodes {
handle_node(&mut osm_stats, &node, &block);
}
for way in &primitive.ways {
handle_way(&mut osm_stats, &way, &block);
}
for relation in &primitive.relations {
handle_relation(&mut osm_stats, &relation, &block);
}
}
}
fn handle_dense_nodes(
mut osm_stats: &mut OsmStats,
dense_nodes: &DenseNodes,
primitive: &PrimitiveBlock,
) {
osm_stats.nodes += dense_nodes.id.len() as u64;
if let Some(dense_info) = &dense_nodes.denseinfo {
let mut last_timestamp = 0;
for delta_timestamp in &dense_info.timestamp {
let timestamp = last_timestamp + delta_timestamp;
handle_timestamp(&mut osm_stats, timestamp, primitive.date_granularity);
last_timestamp = timestamp;
}
}
let mut last_latitude = 0;
for delta_latitude in &dense_nodes.lat {
let latitude = last_latitude + delta_latitude;
handle_latitude(&mut osm_stats, latitude, &primitive);
last_latitude = latitude;
}
let mut last_longitude = 0;
for delta_longitude in &dense_nodes.lon {
let longitude = last_longitude + delta_longitude;
handle_longitude(&mut osm_stats, longitude, &primitive);
last_longitude = longitude;
}
}
fn handle_node(mut osm_stats: &mut OsmStats, node: &Node, primitive: &PrimitiveBlock) {
osm_stats.nodes += 1;
if let Some(info) = &node.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
handle_latitude(&mut osm_stats, node.lat, &primitive);
handle_longitude(&mut osm_stats, node.lon, &primitive);
}
fn handle_way(mut osm_stats: &mut OsmStats, way: &Way, primitive: &PrimitiveBlock) {
osm_stats.ways += 1;
if let Some(info) = &way.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_relation(mut osm_stats: &mut OsmStats, relation: &Relation, primitive: &PrimitiveBlock) {
osm_stats.relations += 1;
if let Some(info) = &relation.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_info(mut osm_stats: &mut OsmStats, info: &Info, date_granularity: i32) {
if let Some(timestamp) = info.timestamp {
handle_timestamp(&mut osm_stats, timestamp, date_granularity);
}
}
fn handle_timestamp(osm_stats: &mut OsmStats, timestamp: i64, date_granularity: i32) {
let millisec_stamp = timestamp * (date_granularity as i64);
if millisec_stamp < osm_stats.timestamp_min {
osm_stats.timestamp_min = millisec_stamp
}
if millisec_stamp > osm_stats.timestamp_max {
osm_stats.timestamp_max = millisec_stamp
}
}
fn handle_latitude(osm_stats: &mut OsmStats, latitude: i64, primitive: &PrimitiveBlock) {
let latitude_f =
0.000000001 * ((primitive.lat_offset + ((primitive.granularity as i64) * latitude)) as f64);
if latitude_f < osm_stats.lat_min {
osm_stats.lat_min = latitude_f
}
if latitude_f > osm_stats.lat_max {
osm_stats.lat_max = latitude_f
}
}
fn handle_longitude(osm_stats: &mut OsmStats, longitude: i64, primitive: &PrimitiveBlock) {
let longitude_f = 0.000000001
* ((primitive.lon_offset + ((primitive.granularity as i64) * longitude)) as f64);
if longitude_f < osm_stats.lon_min {
osm_stats.lon_min = longitude_f
}
if longitude_f > osm_stats.lon_max {
osm_stats.lon_max = longitude_f
}
}
fn empty_osm_stats() -> OsmStats {
OsmStats {
nodes: 0,
relations: 0,
timestamp_max: std::i64::MIN,
timestamp_min: std::i64::MAX,
ways: 0,
lat_min: 100.0,
lat_max: -100.0,
lon_max: -200.0,
lon_min: 200.0,
}
} | random_line_split | |
main.rs | extern crate jemallocator;
extern crate num_cpus;
extern crate quick_protobuf;
mod osm_pbf;
use crossbeam_channel::{bounded, unbounded};
use crossbeam_utils::thread;
use memmap::MmapOptions;
use osm_pbf::{Blob, BlobHeader, DenseNodes, Info, Node, PrimitiveBlock, Relation, Way};
use quick_protobuf::{BytesReader, MessageRead};
use std::cmp::{max, min};
use std::fs::File;
use std::panic;
use std::process;
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
const WORK_BOUND: usize = 4000;
const MAX_COMPRESSED_BLOB_SIZE: i32 = 64 * 1024;
const MAX_DECOMPRESSED_BLOB_SIZE: i32 = 32 * 1024 * 1024;
#[derive(Debug)]
struct OsmStats {
timestamp_min: i64,
timestamp_max: i64,
nodes: u64,
ways: u64,
relations: u64,
lon_min: f64,
lon_max: f64,
lat_min: f64,
lat_max: f64,
}
fn main() {
let args: Vec<_> = std::env::args_os().collect();
let filename = &args[1];
let orig_handler = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
let handler = &orig_handler;
handler(panic_info);
process::exit(1);
}));
match do_processing(filename, num_cpus::get()) {
Ok(result) => println!("{}", result),
Err(err) => println!("{}", err),
}
}
fn do_processing(filename: &std::ffi::OsStr, thread_count: usize) -> Result<String, String> {
let file_handle = File::open(filename).or(Err("unable to open file"))?;
let mmap = unsafe {
MmapOptions::new()
.map(&file_handle)
.or(Err("unable to mmap"))?
};
let bytes = &mmap[..];
let mut reader = BytesReader::from_bytes(&bytes);
let mut sent_messages = 0;
let (sender, receiver) = bounded::<Blob>(WORK_BOUND);
let (return_sender, return_received) = unbounded::<OsmStats>();
thread::scope(|s| {
for _ in 0..thread_count {
let cloned_receiver = receiver.clone();
let cloned_return_sender = return_sender.clone();
s.spawn(move |_| {
let mut buffer = Vec::with_capacity(MAX_DECOMPRESSED_BLOB_SIZE as usize);
let mut stats = empty_osm_stats();
loop {
match cloned_receiver.recv() {
Ok(blob) => {
handle_block(&mut stats, &blob, &mut buffer);
buffer.clear();
}
Err(_e) => break,
}
}
cloned_return_sender
.send(stats)
.expect("failed to send size result");
});
}
loop {
let header_size = match reader.read_sfixed32(bytes).map(|value| value.swap_bytes()) {
Ok(size) if size > MAX_COMPRESSED_BLOB_SIZE => {
return Err("invalid data, compressed blob too large".to_string())
}
Ok(size) => size,
Err(_e) => break,
} as usize;
let blob_header = reader
.read_message_by_len::<BlobHeader>(&bytes, header_size)
.expect("failed to read blob header");
let blob = reader
.read_message_by_len::<Blob>(bytes, blob_header.datasize as usize)
.expect("failed to read blob");
if blob.raw_size.unwrap_or(0) > MAX_DECOMPRESSED_BLOB_SIZE {
return Err("invalid data, uncompressed blob too large".to_string());
}
if blob_header.type_pb == "OSMData" {
sent_messages += 1;
sender.send(blob).expect("failed to send blob");
}
}
drop(sender);
let mut received_messages = 0;
let mut osm_stats = empty_osm_stats();
while received_messages < thread_count {
let worker_stats = return_received.recv().unwrap();
osm_stats.nodes += worker_stats.nodes;
osm_stats.ways += worker_stats.ways;
osm_stats.relations += worker_stats.relations;
osm_stats.timestamp_max = max(osm_stats.timestamp_max, worker_stats.timestamp_max);
osm_stats.timestamp_min = min(osm_stats.timestamp_min, worker_stats.timestamp_min);
if worker_stats.lat_max > osm_stats.lat_max {
osm_stats.lat_max = worker_stats.lat_max
}
if worker_stats.lat_min < osm_stats.lat_min {
osm_stats.lat_min = worker_stats.lat_min
}
if worker_stats.lon_max > osm_stats.lon_max {
osm_stats.lon_max = worker_stats.lon_max
}
if worker_stats.lon_min < osm_stats.lon_min |
received_messages += 1;
}
Ok(format!("{:#?}", osm_stats))
})
.unwrap()
}
fn handle_block(mut osm_stats: &mut OsmStats, blob: &Blob, buffer: &mut Vec<u8>) {
let zlib_data_ref = blob.zlib_data.as_ref();
let tried_block = if blob.raw.is_some() {
let bytes = blob.raw.as_ref().unwrap();
let mut reader = BytesReader::from_bytes(&bytes);
Some(
PrimitiveBlock::from_reader(&mut reader, &bytes)
.expect("failed to read primitive block"),
)
} else if zlib_data_ref.is_some() {
use flate2::{Decompress, FlushDecompress};
let mut decompress = Decompress::new(true);
decompress
.decompress_vec(&zlib_data_ref.unwrap(), buffer, FlushDecompress::Finish)
.expect("error decompressing");
let mut reader = BytesReader::from_bytes(&buffer);
Some(
PrimitiveBlock::from_reader(&mut reader, &buffer)
.expect("failed to read gzipped primitive block"),
)
} else {
None
};
let block = tried_block.unwrap();
handle_primitive_block(&mut osm_stats, &block);
}
fn handle_primitive_block(mut osm_stats: &mut OsmStats, block: &PrimitiveBlock) {
for primitive in &block.primitivegroup {
if let Some(dense_nodes) = &primitive.dense {
handle_dense_nodes(&mut osm_stats, &dense_nodes, &block);
}
for node in &primitive.nodes {
handle_node(&mut osm_stats, &node, &block);
}
for way in &primitive.ways {
handle_way(&mut osm_stats, &way, &block);
}
for relation in &primitive.relations {
handle_relation(&mut osm_stats, &relation, &block);
}
}
}
fn handle_dense_nodes(
mut osm_stats: &mut OsmStats,
dense_nodes: &DenseNodes,
primitive: &PrimitiveBlock,
) {
osm_stats.nodes += dense_nodes.id.len() as u64;
if let Some(dense_info) = &dense_nodes.denseinfo {
let mut last_timestamp = 0;
for delta_timestamp in &dense_info.timestamp {
let timestamp = last_timestamp + delta_timestamp;
handle_timestamp(&mut osm_stats, timestamp, primitive.date_granularity);
last_timestamp = timestamp;
}
}
let mut last_latitude = 0;
for delta_latitude in &dense_nodes.lat {
let latitude = last_latitude + delta_latitude;
handle_latitude(&mut osm_stats, latitude, &primitive);
last_latitude = latitude;
}
let mut last_longitude = 0;
for delta_longitude in &dense_nodes.lon {
let longitude = last_longitude + delta_longitude;
handle_longitude(&mut osm_stats, longitude, &primitive);
last_longitude = longitude;
}
}
fn handle_node(mut osm_stats: &mut OsmStats, node: &Node, primitive: &PrimitiveBlock) {
osm_stats.nodes += 1;
if let Some(info) = &node.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
handle_latitude(&mut osm_stats, node.lat, &primitive);
handle_longitude(&mut osm_stats, node.lon, &primitive);
}
fn handle_way(mut osm_stats: &mut OsmStats, way: &Way, primitive: &PrimitiveBlock) {
osm_stats.ways += 1;
if let Some(info) = &way.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_relation(mut osm_stats: &mut OsmStats, relation: &Relation, primitive: &PrimitiveBlock) {
osm_stats.relations += 1;
if let Some(info) = &relation.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_info(mut osm_stats: &mut OsmStats, info: &Info, date_granularity: i32) {
if let Some(timestamp) = info.timestamp {
handle_timestamp(&mut osm_stats, timestamp, date_granularity);
}
}
fn handle_timestamp(osm_stats: &mut OsmStats, timestamp: i64, date_granularity: i32) {
let millisec_stamp = timestamp * (date_granularity as i64);
if millisec_stamp < osm_stats.timestamp_min {
osm_stats.timestamp_min = millisec_stamp
}
if millisec_stamp > osm_stats.timestamp_max {
osm_stats.timestamp_max = millisec_stamp
}
}
fn handle_latitude(osm_stats: &mut OsmStats, latitude: i64, primitive: &PrimitiveBlock) {
let latitude_f =
0.000000001 * ((primitive.lat_offset + ((primitive.granularity as i64) * latitude)) as f64);
if latitude_f < osm_stats.lat_min {
osm_stats.lat_min = latitude_f
}
if latitude_f > osm_stats.lat_max {
osm_stats.lat_max = latitude_f
}
}
fn handle_longitude(osm_stats: &mut OsmStats, longitude: i64, primitive: &PrimitiveBlock) {
let longitude_f = 0.000000001
* ((primitive.lon_offset + ((primitive.granularity as i64) * longitude)) as f64);
if longitude_f < osm_stats.lon_min {
osm_stats.lon_min = longitude_f
}
if longitude_f > osm_stats.lon_max {
osm_stats.lon_max = longitude_f
}
}
fn empty_osm_stats() -> OsmStats {
OsmStats {
nodes: 0,
relations: 0,
timestamp_max: std::i64::MIN,
timestamp_min: std::i64::MAX,
ways: 0,
lat_min: 100.0,
lat_max: -100.0,
lon_max: -200.0,
lon_min: 200.0,
}
}
| {
osm_stats.lon_min = worker_stats.lon_min
} | conditional_block |
main.rs | extern crate jemallocator;
extern crate num_cpus;
extern crate quick_protobuf;
mod osm_pbf;
use crossbeam_channel::{bounded, unbounded};
use crossbeam_utils::thread;
use memmap::MmapOptions;
use osm_pbf::{Blob, BlobHeader, DenseNodes, Info, Node, PrimitiveBlock, Relation, Way};
use quick_protobuf::{BytesReader, MessageRead};
use std::cmp::{max, min};
use std::fs::File;
use std::panic;
use std::process;
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
const WORK_BOUND: usize = 4000;
const MAX_COMPRESSED_BLOB_SIZE: i32 = 64 * 1024;
const MAX_DECOMPRESSED_BLOB_SIZE: i32 = 32 * 1024 * 1024;
#[derive(Debug)]
struct OsmStats {
timestamp_min: i64,
timestamp_max: i64,
nodes: u64,
ways: u64,
relations: u64,
lon_min: f64,
lon_max: f64,
lat_min: f64,
lat_max: f64,
}
fn main() {
let args: Vec<_> = std::env::args_os().collect();
let filename = &args[1];
let orig_handler = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
let handler = &orig_handler;
handler(panic_info);
process::exit(1);
}));
match do_processing(filename, num_cpus::get()) {
Ok(result) => println!("{}", result),
Err(err) => println!("{}", err),
}
}
fn do_processing(filename: &std::ffi::OsStr, thread_count: usize) -> Result<String, String> {
let file_handle = File::open(filename).or(Err("unable to open file"))?;
let mmap = unsafe {
MmapOptions::new()
.map(&file_handle)
.or(Err("unable to mmap"))?
};
let bytes = &mmap[..];
let mut reader = BytesReader::from_bytes(&bytes);
let mut sent_messages = 0;
let (sender, receiver) = bounded::<Blob>(WORK_BOUND);
let (return_sender, return_received) = unbounded::<OsmStats>();
thread::scope(|s| {
for _ in 0..thread_count {
let cloned_receiver = receiver.clone();
let cloned_return_sender = return_sender.clone();
s.spawn(move |_| {
let mut buffer = Vec::with_capacity(MAX_DECOMPRESSED_BLOB_SIZE as usize);
let mut stats = empty_osm_stats();
loop {
match cloned_receiver.recv() {
Ok(blob) => {
handle_block(&mut stats, &blob, &mut buffer);
buffer.clear();
}
Err(_e) => break,
}
}
cloned_return_sender
.send(stats)
.expect("failed to send size result");
});
}
loop {
let header_size = match reader.read_sfixed32(bytes).map(|value| value.swap_bytes()) {
Ok(size) if size > MAX_COMPRESSED_BLOB_SIZE => {
return Err("invalid data, compressed blob too large".to_string())
}
Ok(size) => size,
Err(_e) => break,
} as usize;
let blob_header = reader
.read_message_by_len::<BlobHeader>(&bytes, header_size)
.expect("failed to read blob header");
let blob = reader
.read_message_by_len::<Blob>(bytes, blob_header.datasize as usize)
.expect("failed to read blob");
if blob.raw_size.unwrap_or(0) > MAX_DECOMPRESSED_BLOB_SIZE {
return Err("invalid data, uncompressed blob too large".to_string());
}
if blob_header.type_pb == "OSMData" {
sent_messages += 1;
sender.send(blob).expect("failed to send blob");
}
}
drop(sender);
let mut received_messages = 0;
let mut osm_stats = empty_osm_stats();
while received_messages < thread_count {
let worker_stats = return_received.recv().unwrap();
osm_stats.nodes += worker_stats.nodes;
osm_stats.ways += worker_stats.ways;
osm_stats.relations += worker_stats.relations;
osm_stats.timestamp_max = max(osm_stats.timestamp_max, worker_stats.timestamp_max);
osm_stats.timestamp_min = min(osm_stats.timestamp_min, worker_stats.timestamp_min);
if worker_stats.lat_max > osm_stats.lat_max {
osm_stats.lat_max = worker_stats.lat_max
}
if worker_stats.lat_min < osm_stats.lat_min {
osm_stats.lat_min = worker_stats.lat_min
}
if worker_stats.lon_max > osm_stats.lon_max {
osm_stats.lon_max = worker_stats.lon_max
}
if worker_stats.lon_min < osm_stats.lon_min {
osm_stats.lon_min = worker_stats.lon_min
}
received_messages += 1;
}
Ok(format!("{:#?}", osm_stats))
})
.unwrap()
}
fn handle_block(mut osm_stats: &mut OsmStats, blob: &Blob, buffer: &mut Vec<u8>) {
let zlib_data_ref = blob.zlib_data.as_ref();
let tried_block = if blob.raw.is_some() {
let bytes = blob.raw.as_ref().unwrap();
let mut reader = BytesReader::from_bytes(&bytes);
Some(
PrimitiveBlock::from_reader(&mut reader, &bytes)
.expect("failed to read primitive block"),
)
} else if zlib_data_ref.is_some() {
use flate2::{Decompress, FlushDecompress};
let mut decompress = Decompress::new(true);
decompress
.decompress_vec(&zlib_data_ref.unwrap(), buffer, FlushDecompress::Finish)
.expect("error decompressing");
let mut reader = BytesReader::from_bytes(&buffer);
Some(
PrimitiveBlock::from_reader(&mut reader, &buffer)
.expect("failed to read gzipped primitive block"),
)
} else {
None
};
let block = tried_block.unwrap();
handle_primitive_block(&mut osm_stats, &block);
}
fn handle_primitive_block(mut osm_stats: &mut OsmStats, block: &PrimitiveBlock) {
for primitive in &block.primitivegroup {
if let Some(dense_nodes) = &primitive.dense {
handle_dense_nodes(&mut osm_stats, &dense_nodes, &block);
}
for node in &primitive.nodes {
handle_node(&mut osm_stats, &node, &block);
}
for way in &primitive.ways {
handle_way(&mut osm_stats, &way, &block);
}
for relation in &primitive.relations {
handle_relation(&mut osm_stats, &relation, &block);
}
}
}
fn handle_dense_nodes(
mut osm_stats: &mut OsmStats,
dense_nodes: &DenseNodes,
primitive: &PrimitiveBlock,
) {
osm_stats.nodes += dense_nodes.id.len() as u64;
if let Some(dense_info) = &dense_nodes.denseinfo {
let mut last_timestamp = 0;
for delta_timestamp in &dense_info.timestamp {
let timestamp = last_timestamp + delta_timestamp;
handle_timestamp(&mut osm_stats, timestamp, primitive.date_granularity);
last_timestamp = timestamp;
}
}
let mut last_latitude = 0;
for delta_latitude in &dense_nodes.lat {
let latitude = last_latitude + delta_latitude;
handle_latitude(&mut osm_stats, latitude, &primitive);
last_latitude = latitude;
}
let mut last_longitude = 0;
for delta_longitude in &dense_nodes.lon {
let longitude = last_longitude + delta_longitude;
handle_longitude(&mut osm_stats, longitude, &primitive);
last_longitude = longitude;
}
}
fn handle_node(mut osm_stats: &mut OsmStats, node: &Node, primitive: &PrimitiveBlock) {
osm_stats.nodes += 1;
if let Some(info) = &node.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
handle_latitude(&mut osm_stats, node.lat, &primitive);
handle_longitude(&mut osm_stats, node.lon, &primitive);
}
fn handle_way(mut osm_stats: &mut OsmStats, way: &Way, primitive: &PrimitiveBlock) {
osm_stats.ways += 1;
if let Some(info) = &way.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_relation(mut osm_stats: &mut OsmStats, relation: &Relation, primitive: &PrimitiveBlock) {
osm_stats.relations += 1;
if let Some(info) = &relation.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_info(mut osm_stats: &mut OsmStats, info: &Info, date_granularity: i32) {
if let Some(timestamp) = info.timestamp {
handle_timestamp(&mut osm_stats, timestamp, date_granularity);
}
}
fn handle_timestamp(osm_stats: &mut OsmStats, timestamp: i64, date_granularity: i32) |
fn handle_latitude(osm_stats: &mut OsmStats, latitude: i64, primitive: &PrimitiveBlock) {
let latitude_f =
0.000000001 * ((primitive.lat_offset + ((primitive.granularity as i64) * latitude)) as f64);
if latitude_f < osm_stats.lat_min {
osm_stats.lat_min = latitude_f
}
if latitude_f > osm_stats.lat_max {
osm_stats.lat_max = latitude_f
}
}
fn handle_longitude(osm_stats: &mut OsmStats, longitude: i64, primitive: &PrimitiveBlock) {
let longitude_f = 0.000000001
* ((primitive.lon_offset + ((primitive.granularity as i64) * longitude)) as f64);
if longitude_f < osm_stats.lon_min {
osm_stats.lon_min = longitude_f
}
if longitude_f > osm_stats.lon_max {
osm_stats.lon_max = longitude_f
}
}
fn empty_osm_stats() -> OsmStats {
OsmStats {
nodes: 0,
relations: 0,
timestamp_max: std::i64::MIN,
timestamp_min: std::i64::MAX,
ways: 0,
lat_min: 100.0,
lat_max: -100.0,
lon_max: -200.0,
lon_min: 200.0,
}
}
| {
let millisec_stamp = timestamp * (date_granularity as i64);
if millisec_stamp < osm_stats.timestamp_min {
osm_stats.timestamp_min = millisec_stamp
}
if millisec_stamp > osm_stats.timestamp_max {
osm_stats.timestamp_max = millisec_stamp
}
} | identifier_body |
provider.go | // Copyright 2016-2021, Pulumi Corporation.
package provider
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"runtime/debug"
"strings"
"time"
"github.com/golang/protobuf/ptypes/empty"
structpb "github.com/golang/protobuf/ptypes/struct"
"github.com/jpillora/backoff"
"github.com/pkg/errors"
"github.com/pulumi/pulumi-google-native/provider/pkg/googleclient"
"github.com/pulumi/pulumi-google-native/provider/pkg/resources"
"github.com/pulumi/pulumi-google-native/provider/pkg/version"
"github.com/pulumi/pulumi/pkg/v3/resource/provider"
"github.com/pulumi/pulumi/sdk/v3/go/common/diag"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/rpcerror"
rpc "github.com/pulumi/pulumi/sdk/v3/proto/go"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type googleCloudProvider struct {
host *provider.HostClient
name string
version string
config map[string]string
schemaBytes []byte
client *googleclient.GoogleClient
resourceMap *resources.CloudAPIMetadata
converter *resources.SdkShapeConverter
}
func makeProvider(host *provider.HostClient, name, version string, schemaBytes []byte,
cloudAPIResourcesBytes []byte) (rpc.ResourceProviderServer, error) {
resourceMap, err := loadMetadata(cloudAPIResourcesBytes)
if err != nil {
return nil, err
}
// Return the new provider
return &googleCloudProvider{
host: host,
name: name,
version: version,
config: map[string]string{},
schemaBytes: schemaBytes,
resourceMap: resourceMap,
converter: &resources.SdkShapeConverter{Types: resourceMap.Types},
}, nil
}
// loadMetadata deserializes the provided compressed json byte array into a CloudAPIMetadata struct.
func loadMetadata(metadataBytes []byte) (*resources.CloudAPIMetadata, error) {
var resourceMap resources.CloudAPIMetadata
uncompressed, err := gzip.NewReader(bytes.NewReader(metadataBytes))
if err != nil {
return nil, errors.Wrap(err, "expand compressed metadata")
}
if err = json.NewDecoder(uncompressed).Decode(&resourceMap); err != nil {
return nil, errors.Wrap(err, "unmarshalling resource map")
}
if err = uncompressed.Close(); err != nil {
return nil, errors.Wrap(err, "closing uncompress stream for metadata")
}
return &resourceMap, nil
}
// Configure configures the resource provider with "globals" that control its behavior.
func (p *googleCloudProvider) Configure(ctx context.Context,
req *rpc.ConfigureRequest) (*rpc.ConfigureResponse, error) {
for key, val := range req.GetVariables() {
p.config[strings.TrimPrefix(key, "google-native:config:")] = val
}
p.setLoggingContext(ctx)
impersonateServiceAccountDelegatesString := p.getConfig("impersonateServiceAccountDelegates", "")
var impersonateServiceAccountDelegates []string
if impersonateServiceAccountDelegatesString != "" {
err := json.Unmarshal([]byte(impersonateServiceAccountDelegatesString), &impersonateServiceAccountDelegates)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal %q as Impersonate Service Account Delegates", impersonateServiceAccountDelegatesString)
}
}
scopesString := p.getConfig("scopes", "")
var scopes []string
if scopesString != "" {
err := json.Unmarshal([]byte(scopesString), &scopes)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal %q as Scopes", scopesString)
}
}
appendUserAgent := p.getConfig("appendUserAgent", "GOOGLE_APPEND_USER_AGENT")
config := googleclient.Config{
Credentials: p.getConfig("credentials", "GOOGLE_CREDENTIALS"),
AccessToken: p.getConfig("accessToken", "GOOGLE_OAUTH_ACCESS_TOKEN"),
ImpersonateServiceAccount: p.getConfig("impersonateServiceAccount", "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT"),
ImpersonateServiceAccountDelegates: impersonateServiceAccountDelegates,
Scopes: scopes,
PulumiVersion: getPulumiVersion(),
ProviderVersion: version.Version,
PartnerName: p.getPartnerName(),
AppendUserAgent: appendUserAgent,
}
client, err := googleclient.New(ctx, config)
if err != nil {
return nil, err
}
p.client = client
return &rpc.ConfigureResponse{
AcceptSecrets: true,
}, nil
}
// Invoke dynamically executes a built-in function in the provider.
func (p *googleCloudProvider) Invoke(_ context.Context, req *rpc.InvokeRequest) (*rpc.InvokeResponse, error) {
label := fmt.Sprintf("%s.Invoke(%s)", p.name, req.Tok)
inv, ok := p.resourceMap.Functions[req.Tok]
if !ok {
return nil, errors.Errorf("invoke %q not found", req.Tok)
}
args, err := plugin.UnmarshalProperties(req.GetArgs(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.args", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
// Apply default config values.
for _, param := range inv.Params {
sdkName := param.Name
if param.SdkName != "" {
sdkName = param.SdkName
}
switch sdkName {
case "project":
key := resource.PropertyKey(sdkName)
if value, ok := p.getDefaultValue(key, sdkName, args); ok {
args[key] = *value
}
}
}
uri, err := buildFunctionUrl(inv, args)
if err != nil {
return nil, err
}
resp, err := p.client.RequestWithTimeout(inv.Verb, uri, nil, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s", err)
}
// Serialize and return outputs.
result, err := plugin.MarshalProperties(
resource.NewPropertyMapFromMap(resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.response", label), SkipNulls: true},
)
if err != nil {
return nil, err
}
return &rpc.InvokeResponse{Return: result}, nil
}
// StreamInvoke dynamically executes a built-in function in the provider. The result is streamed
// back as a series of messages.
func (p *googleCloudProvider) StreamInvoke(_ *rpc.InvokeRequest, _ rpc.ResourceProvider_StreamInvokeServer) error {
return status.Error(codes.Unimplemented, "StreamInvoke is not yet implemented")
}
// Check validates that the given property bag is valid for a resource of the given type and returns
// the inputs that should be passed to successive calls to Diff, Create, or Update for this
// resource. As a rule, the provider inputs returned by a call to Check should preserve the original
// representation of the properties as present in the program inputs. Though this rule is not
// required for correctness, violations thereof can negatively impact the end-user experience, as
// the provider inputs are using for detecting and rendering diffs.
func (p *googleCloudProvider) Check(_ context.Context, req *rpc.CheckRequest) (*rpc.CheckResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Check(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs.
olds, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
news, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.news", label), KeepUnknowns: true, SkipNulls: true,
})
if err != nil {
return nil, err
}
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
// Apply default config values.
var failures []*rpc.CheckFailure
for _, param := range res.CreateParams {
sdkName := param.Name
if param.SdkName != "" {
sdkName = param.SdkName
}
switch sdkName {
case "project", "location", "zone":
key := resource.PropertyKey(sdkName)
configName := sdkName
if sdkName == "location" {
configName = "region"
}
if _, has := news[key]; has {
continue
}
if value, ok := p.getDefaultValue(key, configName, olds); ok {
news[key] = *value
} else {
reason := fmt.Sprintf("missing required property '%s'. Either set it explicitly or configure it with 'pulumi config set google-native:%s <value>'.", sdkName, configName)
failures = append(failures, &rpc.CheckFailure{
Reason: reason,
})
}
}
}
// Auto-naming.
nameKey := resource.PropertyKey("name")
if res.AutoNamePattern != "" && !news.HasValue(nameKey) {
news[nameKey] = getDefaultName(urn, res.AutoNamePattern, olds, news)
}
// Apply property patterns.
for name, prop := range res.CreateProperties {
key := resource.PropertyKey(name)
if prop.SdkName != "" {
key = resource.PropertyKey(prop.SdkName)
}
if value, ok := applyPropertyPattern(key, prop, news); ok {
news[key] = *value
}
}
resInputs, err := plugin.MarshalProperties(news, plugin.MarshalOptions{
Label: fmt.Sprintf("%s.resInputs", label), KeepUnknowns: true})
if err != nil {
return nil, err
}
return &rpc.CheckResponse{Inputs: resInputs, Failures: failures}, nil
}
// Get a default project name for the given inputs.
func (p *googleCloudProvider) getDefaultValue(key resource.PropertyKey, configName string, olds resource.PropertyMap) (*resource.PropertyValue, bool) {
// 1. Check if old inputs define the value.
if v, ok := olds[key]; ok {
return &v, true
}
// 2. Check if the config has a corresponding value.
if cv, ok := p.config[configName]; ok {
v := resource.NewStringProperty(cv)
return &v, true
}
return nil, false
}
func (p *googleCloudProvider) GetSchema(_ context.Context, req *rpc.GetSchemaRequest) (*rpc.GetSchemaResponse, error) {
if v := req.GetVersion(); v != 0 {
return nil, fmt.Errorf("unsupported schema version %d", v)
}
return &rpc.GetSchemaResponse{Schema: string(p.schemaBytes)}, nil
}
// CheckConfig validates the configuration for this provider.
func (p *googleCloudProvider) CheckConfig(_ context.Context, req *rpc.CheckRequest) (*rpc.CheckResponse, error) {
return &rpc.CheckResponse{Inputs: req.GetNews()}, nil
}
// DiffConfig diffs the configuration for this provider.
func (p *googleCloudProvider) DiffConfig(context.Context, *rpc.DiffRequest) (*rpc.DiffResponse, error) {
return &rpc.DiffResponse{
Changes: 0,
Replaces: []string{},
Stables: []string{},
DeleteBeforeReplace: false,
}, nil
}
// Diff checks what impacts a hypothetical update will have on the resource's properties.
func (p *googleCloudProvider) Diff(_ context.Context, req *rpc.DiffRequest) (*rpc.DiffResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Diff(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
oldState, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.oldState", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
// Extract old inputs from the `__inputs` field of the old state.
oldInputs := parseCheckpointObject(oldState)
newInputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.newInputs", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
diff := oldInputs.Diff(newInputs)
if diff == nil {
return &rpc.DiffResponse{Changes: rpc.DiffResponse_DIFF_NONE}, nil
}
var replaces []string
for _, p := range res.CreateParams {
sdkName := p.Name
if p.SdkName != "" {
sdkName = p.SdkName
}
replaces = append(replaces, sdkName)
}
for name, prop := range res.CreateProperties {
if _, has := res.UpdateProperties[name]; !has {
sdkName := name
if prop.SdkName != "" {
sdkName = prop.SdkName
}
replaces = append(replaces, sdkName)
}
}
// Uploads are only supported for create methods, not updates.
if res.AssetUpload {
if _, ok := diff.Updates[resource.PropertyKey("source")]; ok {
replaces = append(replaces, "source")
}
}
return &rpc.DiffResponse{Changes: rpc.DiffResponse_DIFF_UNKNOWN, Replaces: replaces, DeleteBeforeReplace: true}, nil
}
// Create allocates a new instance of the provided resource and returns its unique ID afterwards.
func (p *googleCloudProvider) Create(ctx context.Context, req *rpc.CreateRequest) (*rpc.CreateResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Create(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs
inputs, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.properties", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
inputsMap := inputs.Mappable()
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
uri, err := buildCreateUrl(res, inputs)
if err != nil {
return nil, err
}
body := p.prepareAPIInputs(inputs, nil, res.CreateProperties)
var op map[string]interface{}
if res.AssetUpload {
var content []byte
source := inputs["source"]
if source.IsAsset() {
content, err = source.AssetValue().Bytes()
} else if source.IsArchive() {
content, err = source.ArchiveValue().Bytes(resource.ZIPArchive)
}
if err != nil {
return nil, err
}
op, err = p.client.UploadWithTimeout(res.CreateVerb, uri, body, content, 0)
if err != nil {
return nil, fmt.Errorf("error sending upload request: %s: %q %+v %d", err, uri, inputs.Mappable(), len(content))
}
} else {
op, err = p.client.RequestWithTimeout(res.CreateVerb, uri, body, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s: %q %+v", err, uri, inputs.Mappable())
}
}
resp, err := p.waitForResourceOpCompletion(res.BaseUrl, op)
if err != nil {
if resp == nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
// A partial failure may have occurred because we got an error and a response.
// Try reading the resource state and return a partial error if there is some.
id, idErr := calculateResourceId(res, inputsMap, resp)
if idErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / calculate ID %s", idErr)
}
readResp, getErr := p.client.RequestWithTimeout("GET", id, nil, 0)
if getErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / read state %s", getErr)
}
checkpoint, cpErr := plugin.MarshalProperties(
checkpointObject(inputs, readResp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.partialCheckpoint", label), KeepSecrets: true, SkipNulls: true},
)
if cpErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / checkpoint %s", cpErr)
}
return nil, partialError(id, err, checkpoint, req.GetProperties())
}
// Store both outputs and inputs into the state.
checkpoint, err := plugin.MarshalProperties(
checkpointObject(inputs, resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.checkpoint", label), KeepSecrets: true, SkipNulls: true},
)
id, err := calculateResourceId(res, inputsMap, resp)
if err != nil {
return nil, errors.Wrapf(err, "calculating resource ID")
}
return &rpc.CreateResponse{
Id: id,
Properties: checkpoint,
}, nil
}
func (p *googleCloudProvider) prepareAPIInputs(
inputs, state resource.PropertyMap,
properties map[string]resources.CloudAPIProperty) map[string]interface{} {
inputsMap := inputs.Mappable()
stateMap := state.Mappable()
return p.converter.SdkPropertiesToRequestBody(properties, inputsMap, stateMap)
}
// waitForResourceOpCompletion keeps polling the resource or operation URL until it gets
// a success or a failure of provisioning.
// Note that both a response and an error can be returned in case of a partially-failed deployment
// (e.g., resource is created but failed to initialize to completion).
func (p *googleCloudProvider) waitForResourceOpCompletion(baseUrl string, resp map[string]interface{}) (map[string]interface{}, error) {
retryPolicy := backoff.Backoff{
Min: 1 * time.Second,
Max: 15 * time.Second,
Factor: 1.5,
Jitter: true,
}
for {
logging.V(9).Infof("waiting for completion: %+v", resp)
// There are two styles of operations: one returns a 'done' boolean flag, another one returns status='DONE'.
done, hasDone := resp["done"].(bool)
status, hasStatus := resp["status"].(string)
if completed := (hasDone && done) || (hasStatus && status == "DONE"); completed {
// Extract an error message from the response, if any.
var err error
if failure, has := resp["error"]; has {
err = errors.Errorf("operation errored with %+v", failure)
} else if statusMessage, has := resp["statusMessage"]; has {
err = errors.Errorf("operation failed with %q", statusMessage)
}
// Extract the resource response, if any.
// A partial error could happen, so both response and error could be available.
if response, has := resp["response"].(map[string]interface{}); has {
return response, err
}
if operationType, has := resp["operationType"].(string); has && strings.Contains(strings.ToLower(operationType), "delete") {
return resp, err
}
// Check if there's a target link.
if targetLink, has := resp["targetLink"].(string); has {
// Try reading resource state.
state, getErr := p.client.RequestWithTimeout("GET", targetLink, nil, 0)
if getErr != nil {
if err != nil {
// Return the original creation error if resource read failed.
return nil, err
}
return nil, getErr
}
// A partial error could happen, so both response and error could be available.
return state, err
}
// At this point, we assume either a complete failure or a clean response.
if err != nil {
return nil, err
}
return resp, nil
}
var pollUri string
if selfLink, has := resp["selfLink"].(string); has && hasStatus {
pollUri = selfLink
} else {
if name, has := resp["name"].(string); has && strings.HasPrefix(name, "operations/") {
pollUri = fmt.Sprintf("%s/v1/%s", baseUrl, name)
}
}
if pollUri == "" {
return resp, nil
}
time.Sleep(retryPolicy.Duration())
op, err := p.client.RequestWithTimeout("GET", pollUri, nil, 0)
if err != nil {
return nil, errors.Wrapf(err, "polling operation status")
}
resp = op
}
}
// Read the current live state associated with a resource.
func (p *googleCloudProvider) Read(_ context.Context, req *rpc.ReadRequest) (*rpc.ReadResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Read(%s)", p.name, urn)
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
id := req.GetId()
uri := res.ResourceUrl(id)
// Retrieve the old state.
oldState, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
// Read the current state of the resource from the API.
newState, err := p.client.RequestWithTimeout("GET", uri, nil, 0)
if err != nil {
if reqErr, ok := err.(*googleapi.Error); ok && reqErr.Code == http.StatusNotFound {
// 404 means that the resource was deleted.
return &rpc.ReadResponse{Id: ""}, nil
}
return nil, fmt.Errorf("error sending request: %s", err)
}
// Extract old inputs from the `__inputs` field of the old state.
inputs := parseCheckpointObject(oldState)
newStateProps := resource.NewPropertyMapFromMap(newState)
if inputs == nil {
return nil, status.Error(codes.Unimplemented, "Import is not yet implemented")
} else {
// It's hard to infer the changes in the inputs shape based on the outputs without false positives.
// The current approach is complicated but it's aimed to minimize the noise while refreshing:
// 0. We have "old" inputs and outputs before refresh and "new" outputs read from API.
// 1. Project old outputs to their corresponding input shape (exclude read-only properties).
oldInputProjection := getInputsFromState(res, oldState)
// 2. Project new outputs to their corresponding input shape (exclude read-only properties).
newInputProjection := getInputsFromState(res, newStateProps)
// 3. Calculate the difference between two projections. This should give us actual significant changes
// that happened in Google Cloud between the last resource update and its current state.
diff := oldInputProjection.Diff(newInputProjection)
// 4. Apply this difference to the actual inputs (not a projection) that we have in state.
inputs = applyDiff(inputs, diff)
}
// Store both outputs and inputs into the state checkpoint.
checkpoint, err := plugin.MarshalProperties(
checkpointObject(inputs, newState),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.checkpoint", label), KeepSecrets: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
inputsRecord, err := plugin.MarshalProperties(
inputs,
plugin.MarshalOptions{Label: fmt.Sprintf("%s.inputs", label), KeepUnknowns: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
return &rpc.ReadResponse{Id: id, Properties: checkpoint, Inputs: inputsRecord}, nil
}
// Update updates an existing resource with new values.
func (p *googleCloudProvider) Update(_ context.Context, req *rpc.UpdateRequest) (*rpc.UpdateResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Update(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs
inputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.properties", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
// Deserialize the last known state.
oldState, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.oldState", label), SkipNulls: true,
})
if err != nil {
return nil, errors.Wrapf(err, "reading resource state")
}
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
body := p.prepareAPIInputs(inputs, oldState, res.UpdateProperties)
uri := res.ResourceUrl(req.GetId())
if strings.HasSuffix(uri, ":getIamPolicy") {
uri = strings.ReplaceAll(uri, ":getIamPolicy", ":setIamPolicy")
}
op, err := p.client.RequestWithTimeout(res.UpdateVerb, uri, body, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s: %q %+v", err, uri, body)
}
resp, err := p.waitForResourceOpCompletion(res.BaseUrl, op)
if err != nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
// Read the inputs to persist them into state.
newInputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.newInputs", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
// Store both outputs and inputs into the state and return RPC checkpoint.
outputs, err := plugin.MarshalProperties(
checkpointObject(newInputs, resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.response", label), KeepSecrets: true, SkipNulls: true},
)
return &rpc.UpdateResponse{
Properties: outputs,
}, nil
}
// Delete tears down an existing resource with the given ID. If it fails, the resource is assumed
// to still exist.
func (p *googleCloudProvider) Delete(_ context.Context, req *rpc.DeleteRequest) (*empty.Empty, error) {
urn := resource.URN(req.GetUrn())
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
uri := res.ResourceUrl(req.GetId())
if res.NoDelete {
// At the time of writing, the classic GCP provider has the same behavior and warning for 10 resources.
logging.V(1).Infof("%q resources"+
" cannot be deleted from Google Cloud. The resource %s will be removed from Pulumi"+
" state, but will still be present on Google Cloud.", resourceKey, req.GetId())
return &empty.Empty{}, nil
}
resp, err := p.client.RequestWithTimeout("DELETE", uri, nil, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s", err)
}
_, err = p.waitForResourceOpCompletion(res.BaseUrl, resp)
if err != nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
return &empty.Empty{}, nil
}
// Construct creates a new component resource.
func (p *googleCloudProvider) Construct(_ context.Context, _ *rpc.ConstructRequest) (*rpc.ConstructResponse, error) {
return nil, status.Error(codes.Unimplemented, "Construct is not yet implemented")
}
// Call dynamically executes a method in the provider associated with a component resource.
func (p *googleCloudProvider) Call(_ context.Context, _ *rpc.CallRequest) (*rpc.CallResponse, error) {
return nil, status.Error(codes.Unimplemented, "Call is not yet implemented")
}
// GetPluginInfo returns generic information about this plugin, like its version.
func (p *googleCloudProvider) GetPluginInfo(context.Context, *empty.Empty) (*rpc.PluginInfo, error) {
return &rpc.PluginInfo{
Version: p.version,
}, nil
}
// Cancel signals the provider to gracefully shut down and abort any ongoing resource operations.
// Operations aborted in this way will return an error (e.g., `Update` and `Create` will either a
// creation error or an initialization error). Since Cancel is advisory and non-blocking, it is up
// to the host to decide how long to wait after Cancel is called before (e.g.)
// hard-closing any gRPC connection.
func (p *googleCloudProvider) Cancel(context.Context, *empty.Empty) (*empty.Empty, error) {
return &empty.Empty{}, nil
}
func (p *googleCloudProvider) setLoggingContext(ctx context.Context) {
log.SetOutput(&LogRedirector{
writers: map[string]func(string) error{
tfTracePrefix: func(msg string) error { return p.host.Log(ctx, diag.Debug, "", msg) },
tfDebugPrefix: func(msg string) error { return p.host.Log(ctx, diag.Debug, "", msg) },
tfInfoPrefix: func(msg string) error { return p.host.Log(ctx, diag.Info, "", msg) },
tfWarnPrefix: func(msg string) error { return p.host.Log(ctx, diag.Warning, "", msg) },
tfErrorPrefix: func(msg string) error { return p.host.Log(ctx, diag.Error, "", msg) },
},
})
}
func (p *googleCloudProvider) getConfig(configName, envName string) string {
if val, ok := p.config[configName]; ok {
return val
}
return os.Getenv(envName)
}
func (p *googleCloudProvider) getPartnerName() string |
func getPulumiVersion() string {
if bi, ok := debug.ReadBuildInfo(); ok {
for _, dep := range bi.Deps {
if strings.HasPrefix(dep.Path, "github.com/pulumi/pulumi/pkg") {
return strings.TrimPrefix(dep.Version, "v")
}
}
}
// We should never get here but let's not panic and return something sensible if we do.
logging.V(4).Info("No Pulumi package version found, using '3' as the default version for user-agent")
return "3"
}
// partialError creates an error for resources that did not complete an operation in progress.
// The last known state of the object is included in the error so that it can be checkpointed.
func partialError(id string, err error, state *structpb.Struct, inputs *structpb.Struct) error {
detail := rpc.ErrorResourceInitFailed{
Id: id,
Properties: state,
Reasons: []string{err.Error()},
Inputs: inputs,
}
return rpcerror.WithDetails(rpcerror.New(codes.Unknown, err.Error()), &detail)
}
// checkpointObject puts inputs in the `__inputs` field of the state.
func checkpointObject(inputs resource.PropertyMap, outputs map[string]interface{}) resource.PropertyMap {
object := resource.NewPropertyMapFromMap(outputs)
object["__inputs"] = resource.MakeSecret(resource.NewObjectProperty(inputs))
return object
}
// parseCheckpointObject returns inputs that are saved in the `__inputs` field of the state.
func parseCheckpointObject(obj resource.PropertyMap) resource.PropertyMap {
if inputs, ok := obj["__inputs"]; ok {
return inputs.SecretValue().Element.ObjectValue()
}
return nil
}
| {
result := p.getConfig("partnerName", "GOOGLE_PARTNER_NAME")
if result != "" {
return result
} else {
disablePartner := p.getConfig("disablePartnerName", "GOOGLE_DISABLE_PARTNER_NAME")
if disablePartner == "true" {
return ""
}
}
return "Pulumi"
} | identifier_body |
provider.go | // Copyright 2016-2021, Pulumi Corporation.
package provider
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"runtime/debug"
"strings"
"time"
"github.com/golang/protobuf/ptypes/empty"
structpb "github.com/golang/protobuf/ptypes/struct"
"github.com/jpillora/backoff"
"github.com/pkg/errors"
"github.com/pulumi/pulumi-google-native/provider/pkg/googleclient"
"github.com/pulumi/pulumi-google-native/provider/pkg/resources"
"github.com/pulumi/pulumi-google-native/provider/pkg/version"
"github.com/pulumi/pulumi/pkg/v3/resource/provider"
"github.com/pulumi/pulumi/sdk/v3/go/common/diag"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/rpcerror"
rpc "github.com/pulumi/pulumi/sdk/v3/proto/go"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type googleCloudProvider struct {
host *provider.HostClient
name string
version string
config map[string]string
schemaBytes []byte
client *googleclient.GoogleClient
resourceMap *resources.CloudAPIMetadata
converter *resources.SdkShapeConverter
}
func makeProvider(host *provider.HostClient, name, version string, schemaBytes []byte,
cloudAPIResourcesBytes []byte) (rpc.ResourceProviderServer, error) {
resourceMap, err := loadMetadata(cloudAPIResourcesBytes)
if err != nil {
return nil, err
}
// Return the new provider
return &googleCloudProvider{
host: host,
name: name,
version: version,
config: map[string]string{},
schemaBytes: schemaBytes,
resourceMap: resourceMap,
converter: &resources.SdkShapeConverter{Types: resourceMap.Types},
}, nil
}
// loadMetadata deserializes the provided compressed json byte array into a CloudAPIMetadata struct.
func loadMetadata(metadataBytes []byte) (*resources.CloudAPIMetadata, error) {
var resourceMap resources.CloudAPIMetadata
uncompressed, err := gzip.NewReader(bytes.NewReader(metadataBytes))
if err != nil {
return nil, errors.Wrap(err, "expand compressed metadata")
}
if err = json.NewDecoder(uncompressed).Decode(&resourceMap); err != nil {
return nil, errors.Wrap(err, "unmarshalling resource map")
}
if err = uncompressed.Close(); err != nil {
return nil, errors.Wrap(err, "closing uncompress stream for metadata")
}
return &resourceMap, nil
}
// Configure configures the resource provider with "globals" that control its behavior.
func (p *googleCloudProvider) Configure(ctx context.Context,
req *rpc.ConfigureRequest) (*rpc.ConfigureResponse, error) {
for key, val := range req.GetVariables() {
p.config[strings.TrimPrefix(key, "google-native:config:")] = val
}
p.setLoggingContext(ctx)
impersonateServiceAccountDelegatesString := p.getConfig("impersonateServiceAccountDelegates", "")
var impersonateServiceAccountDelegates []string
if impersonateServiceAccountDelegatesString != "" {
err := json.Unmarshal([]byte(impersonateServiceAccountDelegatesString), &impersonateServiceAccountDelegates)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal %q as Impersonate Service Account Delegates", impersonateServiceAccountDelegatesString)
}
}
scopesString := p.getConfig("scopes", "")
var scopes []string
if scopesString != "" {
err := json.Unmarshal([]byte(scopesString), &scopes)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal %q as Scopes", scopesString)
}
}
appendUserAgent := p.getConfig("appendUserAgent", "GOOGLE_APPEND_USER_AGENT")
config := googleclient.Config{
Credentials: p.getConfig("credentials", "GOOGLE_CREDENTIALS"),
AccessToken: p.getConfig("accessToken", "GOOGLE_OAUTH_ACCESS_TOKEN"),
ImpersonateServiceAccount: p.getConfig("impersonateServiceAccount", "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT"),
ImpersonateServiceAccountDelegates: impersonateServiceAccountDelegates,
Scopes: scopes,
PulumiVersion: getPulumiVersion(),
ProviderVersion: version.Version,
PartnerName: p.getPartnerName(),
AppendUserAgent: appendUserAgent,
}
client, err := googleclient.New(ctx, config)
if err != nil {
return nil, err
}
p.client = client
return &rpc.ConfigureResponse{
AcceptSecrets: true,
}, nil
}
// Invoke dynamically executes a built-in function in the provider.
func (p *googleCloudProvider) Invoke(_ context.Context, req *rpc.InvokeRequest) (*rpc.InvokeResponse, error) {
label := fmt.Sprintf("%s.Invoke(%s)", p.name, req.Tok)
inv, ok := p.resourceMap.Functions[req.Tok]
if !ok {
return nil, errors.Errorf("invoke %q not found", req.Tok)
}
args, err := plugin.UnmarshalProperties(req.GetArgs(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.args", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
// Apply default config values.
for _, param := range inv.Params {
sdkName := param.Name
if param.SdkName != "" {
sdkName = param.SdkName
}
switch sdkName {
case "project":
key := resource.PropertyKey(sdkName)
if value, ok := p.getDefaultValue(key, sdkName, args); ok {
args[key] = *value
}
}
}
uri, err := buildFunctionUrl(inv, args)
if err != nil {
return nil, err
}
resp, err := p.client.RequestWithTimeout(inv.Verb, uri, nil, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s", err)
}
// Serialize and return outputs.
result, err := plugin.MarshalProperties(
resource.NewPropertyMapFromMap(resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.response", label), SkipNulls: true},
)
if err != nil {
return nil, err
}
return &rpc.InvokeResponse{Return: result}, nil
}
// StreamInvoke dynamically executes a built-in function in the provider. The result is streamed
// back as a series of messages.
func (p *googleCloudProvider) StreamInvoke(_ *rpc.InvokeRequest, _ rpc.ResourceProvider_StreamInvokeServer) error {
return status.Error(codes.Unimplemented, "StreamInvoke is not yet implemented")
}
// Check validates that the given property bag is valid for a resource of the given type and returns
// the inputs that should be passed to successive calls to Diff, Create, or Update for this
// resource. As a rule, the provider inputs returned by a call to Check should preserve the original
// representation of the properties as present in the program inputs. Though this rule is not
// required for correctness, violations thereof can negatively impact the end-user experience, as
// the provider inputs are using for detecting and rendering diffs.
func (p *googleCloudProvider) Check(_ context.Context, req *rpc.CheckRequest) (*rpc.CheckResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Check(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs.
olds, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
news, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.news", label), KeepUnknowns: true, SkipNulls: true,
})
if err != nil {
return nil, err
}
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
// Apply default config values.
var failures []*rpc.CheckFailure
for _, param := range res.CreateParams {
sdkName := param.Name
if param.SdkName != "" {
sdkName = param.SdkName
}
switch sdkName {
case "project", "location", "zone":
key := resource.PropertyKey(sdkName)
configName := sdkName
if sdkName == "location" {
configName = "region"
}
if _, has := news[key]; has {
continue
}
if value, ok := p.getDefaultValue(key, configName, olds); ok {
news[key] = *value
} else {
reason := fmt.Sprintf("missing required property '%s'. Either set it explicitly or configure it with 'pulumi config set google-native:%s <value>'.", sdkName, configName)
failures = append(failures, &rpc.CheckFailure{
Reason: reason,
})
}
}
}
// Auto-naming.
nameKey := resource.PropertyKey("name")
if res.AutoNamePattern != "" && !news.HasValue(nameKey) {
news[nameKey] = getDefaultName(urn, res.AutoNamePattern, olds, news)
}
// Apply property patterns.
for name, prop := range res.CreateProperties {
key := resource.PropertyKey(name)
if prop.SdkName != "" {
key = resource.PropertyKey(prop.SdkName)
}
if value, ok := applyPropertyPattern(key, prop, news); ok {
news[key] = *value
}
}
resInputs, err := plugin.MarshalProperties(news, plugin.MarshalOptions{
Label: fmt.Sprintf("%s.resInputs", label), KeepUnknowns: true})
if err != nil {
return nil, err
}
return &rpc.CheckResponse{Inputs: resInputs, Failures: failures}, nil
}
// Get a default project name for the given inputs.
func (p *googleCloudProvider) getDefaultValue(key resource.PropertyKey, configName string, olds resource.PropertyMap) (*resource.PropertyValue, bool) {
// 1. Check if old inputs define the value.
if v, ok := olds[key]; ok {
return &v, true
}
// 2. Check if the config has a corresponding value.
if cv, ok := p.config[configName]; ok {
v := resource.NewStringProperty(cv)
return &v, true
}
return nil, false
}
func (p *googleCloudProvider) GetSchema(_ context.Context, req *rpc.GetSchemaRequest) (*rpc.GetSchemaResponse, error) {
if v := req.GetVersion(); v != 0 {
return nil, fmt.Errorf("unsupported schema version %d", v)
}
return &rpc.GetSchemaResponse{Schema: string(p.schemaBytes)}, nil
}
// CheckConfig validates the configuration for this provider.
func (p *googleCloudProvider) CheckConfig(_ context.Context, req *rpc.CheckRequest) (*rpc.CheckResponse, error) {
return &rpc.CheckResponse{Inputs: req.GetNews()}, nil
}
// DiffConfig diffs the configuration for this provider.
func (p *googleCloudProvider) DiffConfig(context.Context, *rpc.DiffRequest) (*rpc.DiffResponse, error) {
return &rpc.DiffResponse{
Changes: 0,
Replaces: []string{},
Stables: []string{},
DeleteBeforeReplace: false,
}, nil
}
// Diff checks what impacts a hypothetical update will have on the resource's properties.
func (p *googleCloudProvider) Diff(_ context.Context, req *rpc.DiffRequest) (*rpc.DiffResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Diff(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
oldState, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.oldState", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
// Extract old inputs from the `__inputs` field of the old state.
oldInputs := parseCheckpointObject(oldState)
newInputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.newInputs", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
diff := oldInputs.Diff(newInputs)
if diff == nil {
return &rpc.DiffResponse{Changes: rpc.DiffResponse_DIFF_NONE}, nil
}
var replaces []string
for _, p := range res.CreateParams {
sdkName := p.Name
if p.SdkName != "" {
sdkName = p.SdkName
}
replaces = append(replaces, sdkName)
}
for name, prop := range res.CreateProperties {
if _, has := res.UpdateProperties[name]; !has {
sdkName := name
if prop.SdkName != "" {
sdkName = prop.SdkName
}
replaces = append(replaces, sdkName)
}
}
// Uploads are only supported for create methods, not updates.
if res.AssetUpload {
if _, ok := diff.Updates[resource.PropertyKey("source")]; ok {
replaces = append(replaces, "source")
}
}
return &rpc.DiffResponse{Changes: rpc.DiffResponse_DIFF_UNKNOWN, Replaces: replaces, DeleteBeforeReplace: true}, nil
}
// Create allocates a new instance of the provided resource and returns its unique ID afterwards.
func (p *googleCloudProvider) Create(ctx context.Context, req *rpc.CreateRequest) (*rpc.CreateResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Create(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs
inputs, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.properties", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
inputsMap := inputs.Mappable()
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
uri, err := buildCreateUrl(res, inputs)
if err != nil {
return nil, err
}
body := p.prepareAPIInputs(inputs, nil, res.CreateProperties)
var op map[string]interface{}
if res.AssetUpload {
var content []byte
source := inputs["source"]
if source.IsAsset() {
content, err = source.AssetValue().Bytes()
} else if source.IsArchive() {
content, err = source.ArchiveValue().Bytes(resource.ZIPArchive)
}
if err != nil {
return nil, err
}
op, err = p.client.UploadWithTimeout(res.CreateVerb, uri, body, content, 0)
if err != nil {
return nil, fmt.Errorf("error sending upload request: %s: %q %+v %d", err, uri, inputs.Mappable(), len(content))
}
} else {
op, err = p.client.RequestWithTimeout(res.CreateVerb, uri, body, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s: %q %+v", err, uri, inputs.Mappable())
}
}
resp, err := p.waitForResourceOpCompletion(res.BaseUrl, op)
if err != nil {
if resp == nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
// A partial failure may have occurred because we got an error and a response.
// Try reading the resource state and return a partial error if there is some.
id, idErr := calculateResourceId(res, inputsMap, resp)
if idErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / calculate ID %s", idErr)
}
readResp, getErr := p.client.RequestWithTimeout("GET", id, nil, 0)
if getErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / read state %s", getErr)
}
checkpoint, cpErr := plugin.MarshalProperties(
checkpointObject(inputs, readResp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.partialCheckpoint", label), KeepSecrets: true, SkipNulls: true},
)
if cpErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / checkpoint %s", cpErr)
}
return nil, partialError(id, err, checkpoint, req.GetProperties())
}
// Store both outputs and inputs into the state.
checkpoint, err := plugin.MarshalProperties(
checkpointObject(inputs, resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.checkpoint", label), KeepSecrets: true, SkipNulls: true},
)
id, err := calculateResourceId(res, inputsMap, resp)
if err != nil {
return nil, errors.Wrapf(err, "calculating resource ID")
}
return &rpc.CreateResponse{
Id: id,
Properties: checkpoint,
}, nil
}
func (p *googleCloudProvider) prepareAPIInputs(
inputs, state resource.PropertyMap,
properties map[string]resources.CloudAPIProperty) map[string]interface{} {
inputsMap := inputs.Mappable()
stateMap := state.Mappable()
return p.converter.SdkPropertiesToRequestBody(properties, inputsMap, stateMap)
}
// waitForResourceOpCompletion keeps polling the resource or operation URL until it gets
// a success or a failure of provisioning.
// Note that both a response and an error can be returned in case of a partially-failed deployment
// (e.g., resource is created but failed to initialize to completion).
func (p *googleCloudProvider) waitForResourceOpCompletion(baseUrl string, resp map[string]interface{}) (map[string]interface{}, error) {
retryPolicy := backoff.Backoff{
Min: 1 * time.Second,
Max: 15 * time.Second,
Factor: 1.5,
Jitter: true,
}
for {
logging.V(9).Infof("waiting for completion: %+v", resp)
// There are two styles of operations: one returns a 'done' boolean flag, another one returns status='DONE'.
done, hasDone := resp["done"].(bool)
status, hasStatus := resp["status"].(string)
if completed := (hasDone && done) || (hasStatus && status == "DONE"); completed {
// Extract an error message from the response, if any.
var err error
if failure, has := resp["error"]; has {
err = errors.Errorf("operation errored with %+v", failure)
} else if statusMessage, has := resp["statusMessage"]; has {
err = errors.Errorf("operation failed with %q", statusMessage)
}
// Extract the resource response, if any.
// A partial error could happen, so both response and error could be available.
if response, has := resp["response"].(map[string]interface{}); has {
return response, err
}
if operationType, has := resp["operationType"].(string); has && strings.Contains(strings.ToLower(operationType), "delete") {
return resp, err
}
// Check if there's a target link.
if targetLink, has := resp["targetLink"].(string); has {
// Try reading resource state.
state, getErr := p.client.RequestWithTimeout("GET", targetLink, nil, 0)
if getErr != nil {
if err != nil {
// Return the original creation error if resource read failed.
return nil, err
}
return nil, getErr
}
// A partial error could happen, so both response and error could be available.
return state, err
}
// At this point, we assume either a complete failure or a clean response.
if err != nil {
return nil, err
}
return resp, nil
}
var pollUri string
if selfLink, has := resp["selfLink"].(string); has && hasStatus {
pollUri = selfLink
} else {
if name, has := resp["name"].(string); has && strings.HasPrefix(name, "operations/") {
pollUri = fmt.Sprintf("%s/v1/%s", baseUrl, name)
}
}
if pollUri == "" {
return resp, nil
}
time.Sleep(retryPolicy.Duration())
op, err := p.client.RequestWithTimeout("GET", pollUri, nil, 0)
if err != nil {
return nil, errors.Wrapf(err, "polling operation status")
}
resp = op
}
}
// Read the current live state associated with a resource.
func (p *googleCloudProvider) Read(_ context.Context, req *rpc.ReadRequest) (*rpc.ReadResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Read(%s)", p.name, urn)
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
id := req.GetId()
uri := res.ResourceUrl(id)
// Retrieve the old state.
oldState, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
// Read the current state of the resource from the API.
newState, err := p.client.RequestWithTimeout("GET", uri, nil, 0)
if err != nil {
if reqErr, ok := err.(*googleapi.Error); ok && reqErr.Code == http.StatusNotFound {
// 404 means that the resource was deleted.
return &rpc.ReadResponse{Id: ""}, nil
}
return nil, fmt.Errorf("error sending request: %s", err)
}
// Extract old inputs from the `__inputs` field of the old state.
inputs := parseCheckpointObject(oldState)
newStateProps := resource.NewPropertyMapFromMap(newState)
if inputs == nil {
return nil, status.Error(codes.Unimplemented, "Import is not yet implemented")
} else {
// It's hard to infer the changes in the inputs shape based on the outputs without false positives.
// The current approach is complicated but it's aimed to minimize the noise while refreshing:
// 0. We have "old" inputs and outputs before refresh and "new" outputs read from API.
// 1. Project old outputs to their corresponding input shape (exclude read-only properties).
oldInputProjection := getInputsFromState(res, oldState)
// 2. Project new outputs to their corresponding input shape (exclude read-only properties).
newInputProjection := getInputsFromState(res, newStateProps)
// 3. Calculate the difference between two projections. This should give us actual significant changes
// that happened in Google Cloud between the last resource update and its current state.
diff := oldInputProjection.Diff(newInputProjection)
// 4. Apply this difference to the actual inputs (not a projection) that we have in state.
inputs = applyDiff(inputs, diff)
}
// Store both outputs and inputs into the state checkpoint.
checkpoint, err := plugin.MarshalProperties(
checkpointObject(inputs, newState),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.checkpoint", label), KeepSecrets: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
inputsRecord, err := plugin.MarshalProperties(
inputs,
plugin.MarshalOptions{Label: fmt.Sprintf("%s.inputs", label), KeepUnknowns: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
return &rpc.ReadResponse{Id: id, Properties: checkpoint, Inputs: inputsRecord}, nil
}
// Update updates an existing resource with new values.
func (p *googleCloudProvider) Update(_ context.Context, req *rpc.UpdateRequest) (*rpc.UpdateResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Update(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs
inputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.properties", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
// Deserialize the last known state.
oldState, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.oldState", label), SkipNulls: true,
})
if err != nil {
return nil, errors.Wrapf(err, "reading resource state")
}
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
body := p.prepareAPIInputs(inputs, oldState, res.UpdateProperties)
uri := res.ResourceUrl(req.GetId())
if strings.HasSuffix(uri, ":getIamPolicy") {
uri = strings.ReplaceAll(uri, ":getIamPolicy", ":setIamPolicy")
}
op, err := p.client.RequestWithTimeout(res.UpdateVerb, uri, body, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s: %q %+v", err, uri, body)
}
resp, err := p.waitForResourceOpCompletion(res.BaseUrl, op)
if err != nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
// Read the inputs to persist them into state.
newInputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.newInputs", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
// Store both outputs and inputs into the state and return RPC checkpoint.
outputs, err := plugin.MarshalProperties(
checkpointObject(newInputs, resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.response", label), KeepSecrets: true, SkipNulls: true},
)
return &rpc.UpdateResponse{
Properties: outputs,
}, nil
}
// Delete tears down an existing resource with the given ID. If it fails, the resource is assumed
// to still exist.
func (p *googleCloudProvider) Delete(_ context.Context, req *rpc.DeleteRequest) (*empty.Empty, error) {
urn := resource.URN(req.GetUrn())
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
uri := res.ResourceUrl(req.GetId())
if res.NoDelete {
// At the time of writing, the classic GCP provider has the same behavior and warning for 10 resources.
logging.V(1).Infof("%q resources"+
" cannot be deleted from Google Cloud. The resource %s will be removed from Pulumi"+
" state, but will still be present on Google Cloud.", resourceKey, req.GetId())
return &empty.Empty{}, nil
}
resp, err := p.client.RequestWithTimeout("DELETE", uri, nil, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s", err)
}
_, err = p.waitForResourceOpCompletion(res.BaseUrl, resp)
if err != nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
return &empty.Empty{}, nil
}
// Construct creates a new component resource.
func (p *googleCloudProvider) | (_ context.Context, _ *rpc.ConstructRequest) (*rpc.ConstructResponse, error) {
return nil, status.Error(codes.Unimplemented, "Construct is not yet implemented")
}
// Call dynamically executes a method in the provider associated with a component resource.
func (p *googleCloudProvider) Call(_ context.Context, _ *rpc.CallRequest) (*rpc.CallResponse, error) {
return nil, status.Error(codes.Unimplemented, "Call is not yet implemented")
}
// GetPluginInfo returns generic information about this plugin, like its version.
func (p *googleCloudProvider) GetPluginInfo(context.Context, *empty.Empty) (*rpc.PluginInfo, error) {
return &rpc.PluginInfo{
Version: p.version,
}, nil
}
// Cancel signals the provider to gracefully shut down and abort any ongoing resource operations.
// Operations aborted in this way will return an error (e.g., `Update` and `Create` will either a
// creation error or an initialization error). Since Cancel is advisory and non-blocking, it is up
// to the host to decide how long to wait after Cancel is called before (e.g.)
// hard-closing any gRPC connection.
func (p *googleCloudProvider) Cancel(context.Context, *empty.Empty) (*empty.Empty, error) {
return &empty.Empty{}, nil
}
func (p *googleCloudProvider) setLoggingContext(ctx context.Context) {
log.SetOutput(&LogRedirector{
writers: map[string]func(string) error{
tfTracePrefix: func(msg string) error { return p.host.Log(ctx, diag.Debug, "", msg) },
tfDebugPrefix: func(msg string) error { return p.host.Log(ctx, diag.Debug, "", msg) },
tfInfoPrefix: func(msg string) error { return p.host.Log(ctx, diag.Info, "", msg) },
tfWarnPrefix: func(msg string) error { return p.host.Log(ctx, diag.Warning, "", msg) },
tfErrorPrefix: func(msg string) error { return p.host.Log(ctx, diag.Error, "", msg) },
},
})
}
func (p *googleCloudProvider) getConfig(configName, envName string) string {
if val, ok := p.config[configName]; ok {
return val
}
return os.Getenv(envName)
}
func (p *googleCloudProvider) getPartnerName() string {
result := p.getConfig("partnerName", "GOOGLE_PARTNER_NAME")
if result != "" {
return result
} else {
disablePartner := p.getConfig("disablePartnerName", "GOOGLE_DISABLE_PARTNER_NAME")
if disablePartner == "true" {
return ""
}
}
return "Pulumi"
}
func getPulumiVersion() string {
if bi, ok := debug.ReadBuildInfo(); ok {
for _, dep := range bi.Deps {
if strings.HasPrefix(dep.Path, "github.com/pulumi/pulumi/pkg") {
return strings.TrimPrefix(dep.Version, "v")
}
}
}
// We should never get here but let's not panic and return something sensible if we do.
logging.V(4).Info("No Pulumi package version found, using '3' as the default version for user-agent")
return "3"
}
// partialError creates an error for resources that did not complete an operation in progress.
// The last known state of the object is included in the error so that it can be checkpointed.
func partialError(id string, err error, state *structpb.Struct, inputs *structpb.Struct) error {
detail := rpc.ErrorResourceInitFailed{
Id: id,
Properties: state,
Reasons: []string{err.Error()},
Inputs: inputs,
}
return rpcerror.WithDetails(rpcerror.New(codes.Unknown, err.Error()), &detail)
}
// checkpointObject puts inputs in the `__inputs` field of the state.
func checkpointObject(inputs resource.PropertyMap, outputs map[string]interface{}) resource.PropertyMap {
object := resource.NewPropertyMapFromMap(outputs)
object["__inputs"] = resource.MakeSecret(resource.NewObjectProperty(inputs))
return object
}
// parseCheckpointObject returns inputs that are saved in the `__inputs` field of the state.
func parseCheckpointObject(obj resource.PropertyMap) resource.PropertyMap {
if inputs, ok := obj["__inputs"]; ok {
return inputs.SecretValue().Element.ObjectValue()
}
return nil
}
| Construct | identifier_name |
provider.go | // Copyright 2016-2021, Pulumi Corporation.
package provider
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"runtime/debug"
"strings"
"time"
"github.com/golang/protobuf/ptypes/empty"
structpb "github.com/golang/protobuf/ptypes/struct"
"github.com/jpillora/backoff"
"github.com/pkg/errors"
"github.com/pulumi/pulumi-google-native/provider/pkg/googleclient"
"github.com/pulumi/pulumi-google-native/provider/pkg/resources"
"github.com/pulumi/pulumi-google-native/provider/pkg/version"
"github.com/pulumi/pulumi/pkg/v3/resource/provider"
"github.com/pulumi/pulumi/sdk/v3/go/common/diag"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/rpcerror"
rpc "github.com/pulumi/pulumi/sdk/v3/proto/go"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type googleCloudProvider struct {
host *provider.HostClient
name string
version string
config map[string]string
schemaBytes []byte
client *googleclient.GoogleClient
resourceMap *resources.CloudAPIMetadata
converter *resources.SdkShapeConverter
}
func makeProvider(host *provider.HostClient, name, version string, schemaBytes []byte,
cloudAPIResourcesBytes []byte) (rpc.ResourceProviderServer, error) {
resourceMap, err := loadMetadata(cloudAPIResourcesBytes)
if err != nil {
return nil, err
}
// Return the new provider
return &googleCloudProvider{
host: host,
name: name,
version: version,
config: map[string]string{},
schemaBytes: schemaBytes,
resourceMap: resourceMap,
converter: &resources.SdkShapeConverter{Types: resourceMap.Types},
}, nil
}
// loadMetadata deserializes the provided compressed json byte array into a CloudAPIMetadata struct.
func loadMetadata(metadataBytes []byte) (*resources.CloudAPIMetadata, error) {
var resourceMap resources.CloudAPIMetadata
uncompressed, err := gzip.NewReader(bytes.NewReader(metadataBytes))
if err != nil {
return nil, errors.Wrap(err, "expand compressed metadata")
}
if err = json.NewDecoder(uncompressed).Decode(&resourceMap); err != nil {
return nil, errors.Wrap(err, "unmarshalling resource map")
}
if err = uncompressed.Close(); err != nil {
return nil, errors.Wrap(err, "closing uncompress stream for metadata")
}
return &resourceMap, nil
}
// Configure configures the resource provider with "globals" that control its behavior.
func (p *googleCloudProvider) Configure(ctx context.Context,
req *rpc.ConfigureRequest) (*rpc.ConfigureResponse, error) {
for key, val := range req.GetVariables() {
p.config[strings.TrimPrefix(key, "google-native:config:")] = val
}
p.setLoggingContext(ctx)
impersonateServiceAccountDelegatesString := p.getConfig("impersonateServiceAccountDelegates", "")
var impersonateServiceAccountDelegates []string
if impersonateServiceAccountDelegatesString != "" {
err := json.Unmarshal([]byte(impersonateServiceAccountDelegatesString), &impersonateServiceAccountDelegates)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal %q as Impersonate Service Account Delegates", impersonateServiceAccountDelegatesString)
}
}
scopesString := p.getConfig("scopes", "")
var scopes []string
if scopesString != "" {
err := json.Unmarshal([]byte(scopesString), &scopes)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal %q as Scopes", scopesString)
}
}
appendUserAgent := p.getConfig("appendUserAgent", "GOOGLE_APPEND_USER_AGENT")
config := googleclient.Config{
Credentials: p.getConfig("credentials", "GOOGLE_CREDENTIALS"),
AccessToken: p.getConfig("accessToken", "GOOGLE_OAUTH_ACCESS_TOKEN"),
ImpersonateServiceAccount: p.getConfig("impersonateServiceAccount", "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT"),
ImpersonateServiceAccountDelegates: impersonateServiceAccountDelegates,
Scopes: scopes,
PulumiVersion: getPulumiVersion(),
ProviderVersion: version.Version,
PartnerName: p.getPartnerName(),
AppendUserAgent: appendUserAgent,
}
client, err := googleclient.New(ctx, config)
if err != nil {
return nil, err
}
p.client = client
return &rpc.ConfigureResponse{
AcceptSecrets: true,
}, nil
}
// Invoke dynamically executes a built-in function in the provider.
func (p *googleCloudProvider) Invoke(_ context.Context, req *rpc.InvokeRequest) (*rpc.InvokeResponse, error) {
label := fmt.Sprintf("%s.Invoke(%s)", p.name, req.Tok)
inv, ok := p.resourceMap.Functions[req.Tok]
if !ok {
return nil, errors.Errorf("invoke %q not found", req.Tok)
}
args, err := plugin.UnmarshalProperties(req.GetArgs(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.args", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
// Apply default config values.
for _, param := range inv.Params {
sdkName := param.Name
if param.SdkName != "" {
sdkName = param.SdkName
}
switch sdkName {
case "project":
key := resource.PropertyKey(sdkName)
if value, ok := p.getDefaultValue(key, sdkName, args); ok {
args[key] = *value
}
}
}
uri, err := buildFunctionUrl(inv, args)
if err != nil {
return nil, err
}
resp, err := p.client.RequestWithTimeout(inv.Verb, uri, nil, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s", err)
}
// Serialize and return outputs.
result, err := plugin.MarshalProperties(
resource.NewPropertyMapFromMap(resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.response", label), SkipNulls: true},
)
if err != nil {
return nil, err
}
return &rpc.InvokeResponse{Return: result}, nil
}
// StreamInvoke dynamically executes a built-in function in the provider. The result is streamed
// back as a series of messages.
func (p *googleCloudProvider) StreamInvoke(_ *rpc.InvokeRequest, _ rpc.ResourceProvider_StreamInvokeServer) error {
return status.Error(codes.Unimplemented, "StreamInvoke is not yet implemented")
}
// Check validates that the given property bag is valid for a resource of the given type and returns
// the inputs that should be passed to successive calls to Diff, Create, or Update for this
// resource. As a rule, the provider inputs returned by a call to Check should preserve the original
// representation of the properties as present in the program inputs. Though this rule is not
// required for correctness, violations thereof can negatively impact the end-user experience, as
// the provider inputs are using for detecting and rendering diffs.
func (p *googleCloudProvider) Check(_ context.Context, req *rpc.CheckRequest) (*rpc.CheckResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Check(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs.
olds, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
news, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.news", label), KeepUnknowns: true, SkipNulls: true,
})
if err != nil {
return nil, err
}
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
// Apply default config values.
var failures []*rpc.CheckFailure
for _, param := range res.CreateParams {
sdkName := param.Name
if param.SdkName != "" {
sdkName = param.SdkName
}
switch sdkName {
case "project", "location", "zone":
key := resource.PropertyKey(sdkName)
configName := sdkName
if sdkName == "location" {
configName = "region"
}
if _, has := news[key]; has {
continue
}
if value, ok := p.getDefaultValue(key, configName, olds); ok {
news[key] = *value
} else {
reason := fmt.Sprintf("missing required property '%s'. Either set it explicitly or configure it with 'pulumi config set google-native:%s <value>'.", sdkName, configName)
failures = append(failures, &rpc.CheckFailure{
Reason: reason,
})
}
}
}
// Auto-naming.
nameKey := resource.PropertyKey("name")
if res.AutoNamePattern != "" && !news.HasValue(nameKey) {
news[nameKey] = getDefaultName(urn, res.AutoNamePattern, olds, news)
}
// Apply property patterns.
for name, prop := range res.CreateProperties {
key := resource.PropertyKey(name)
if prop.SdkName != "" {
key = resource.PropertyKey(prop.SdkName)
}
if value, ok := applyPropertyPattern(key, prop, news); ok {
news[key] = *value
}
}
resInputs, err := plugin.MarshalProperties(news, plugin.MarshalOptions{
Label: fmt.Sprintf("%s.resInputs", label), KeepUnknowns: true})
if err != nil {
return nil, err
}
return &rpc.CheckResponse{Inputs: resInputs, Failures: failures}, nil
}
// Get a default project name for the given inputs.
func (p *googleCloudProvider) getDefaultValue(key resource.PropertyKey, configName string, olds resource.PropertyMap) (*resource.PropertyValue, bool) {
// 1. Check if old inputs define the value.
if v, ok := olds[key]; ok {
return &v, true
}
// 2. Check if the config has a corresponding value.
if cv, ok := p.config[configName]; ok {
v := resource.NewStringProperty(cv)
return &v, true
}
return nil, false
}
func (p *googleCloudProvider) GetSchema(_ context.Context, req *rpc.GetSchemaRequest) (*rpc.GetSchemaResponse, error) {
if v := req.GetVersion(); v != 0 {
return nil, fmt.Errorf("unsupported schema version %d", v)
}
return &rpc.GetSchemaResponse{Schema: string(p.schemaBytes)}, nil
}
// CheckConfig validates the configuration for this provider.
func (p *googleCloudProvider) CheckConfig(_ context.Context, req *rpc.CheckRequest) (*rpc.CheckResponse, error) {
return &rpc.CheckResponse{Inputs: req.GetNews()}, nil
}
// DiffConfig diffs the configuration for this provider.
func (p *googleCloudProvider) DiffConfig(context.Context, *rpc.DiffRequest) (*rpc.DiffResponse, error) {
return &rpc.DiffResponse{
Changes: 0,
Replaces: []string{},
Stables: []string{},
DeleteBeforeReplace: false,
}, nil
}
// Diff checks what impacts a hypothetical update will have on the resource's properties.
func (p *googleCloudProvider) Diff(_ context.Context, req *rpc.DiffRequest) (*rpc.DiffResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Diff(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
oldState, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.oldState", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
// Extract old inputs from the `__inputs` field of the old state.
oldInputs := parseCheckpointObject(oldState)
newInputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.newInputs", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
diff := oldInputs.Diff(newInputs)
if diff == nil {
return &rpc.DiffResponse{Changes: rpc.DiffResponse_DIFF_NONE}, nil
}
var replaces []string
for _, p := range res.CreateParams {
sdkName := p.Name
if p.SdkName != "" {
sdkName = p.SdkName
}
replaces = append(replaces, sdkName)
}
for name, prop := range res.CreateProperties {
if _, has := res.UpdateProperties[name]; !has {
sdkName := name
if prop.SdkName != "" {
sdkName = prop.SdkName
}
replaces = append(replaces, sdkName)
}
}
// Uploads are only supported for create methods, not updates.
if res.AssetUpload {
if _, ok := diff.Updates[resource.PropertyKey("source")]; ok {
replaces = append(replaces, "source")
}
}
return &rpc.DiffResponse{Changes: rpc.DiffResponse_DIFF_UNKNOWN, Replaces: replaces, DeleteBeforeReplace: true}, nil
}
// Create allocates a new instance of the provided resource and returns its unique ID afterwards.
func (p *googleCloudProvider) Create(ctx context.Context, req *rpc.CreateRequest) (*rpc.CreateResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Create(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs
inputs, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.properties", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
inputsMap := inputs.Mappable()
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
uri, err := buildCreateUrl(res, inputs)
if err != nil {
return nil, err
}
body := p.prepareAPIInputs(inputs, nil, res.CreateProperties)
var op map[string]interface{}
if res.AssetUpload {
var content []byte
source := inputs["source"]
if source.IsAsset() {
content, err = source.AssetValue().Bytes()
} else if source.IsArchive() {
content, err = source.ArchiveValue().Bytes(resource.ZIPArchive)
}
if err != nil {
return nil, err
}
op, err = p.client.UploadWithTimeout(res.CreateVerb, uri, body, content, 0)
if err != nil {
return nil, fmt.Errorf("error sending upload request: %s: %q %+v %d", err, uri, inputs.Mappable(), len(content))
}
} else {
op, err = p.client.RequestWithTimeout(res.CreateVerb, uri, body, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s: %q %+v", err, uri, inputs.Mappable())
}
}
resp, err := p.waitForResourceOpCompletion(res.BaseUrl, op)
if err != nil {
if resp == nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
// A partial failure may have occurred because we got an error and a response.
// Try reading the resource state and return a partial error if there is some.
id, idErr := calculateResourceId(res, inputsMap, resp)
if idErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / calculate ID %s", idErr)
}
readResp, getErr := p.client.RequestWithTimeout("GET", id, nil, 0)
if getErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / read state %s", getErr)
}
checkpoint, cpErr := plugin.MarshalProperties(
checkpointObject(inputs, readResp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.partialCheckpoint", label), KeepSecrets: true, SkipNulls: true},
)
if cpErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / checkpoint %s", cpErr)
}
return nil, partialError(id, err, checkpoint, req.GetProperties())
}
// Store both outputs and inputs into the state.
checkpoint, err := plugin.MarshalProperties(
checkpointObject(inputs, resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.checkpoint", label), KeepSecrets: true, SkipNulls: true},
)
id, err := calculateResourceId(res, inputsMap, resp)
if err != nil {
return nil, errors.Wrapf(err, "calculating resource ID")
}
return &rpc.CreateResponse{
Id: id,
Properties: checkpoint,
}, nil
}
func (p *googleCloudProvider) prepareAPIInputs(
inputs, state resource.PropertyMap,
properties map[string]resources.CloudAPIProperty) map[string]interface{} {
inputsMap := inputs.Mappable()
stateMap := state.Mappable()
return p.converter.SdkPropertiesToRequestBody(properties, inputsMap, stateMap)
}
// waitForResourceOpCompletion keeps polling the resource or operation URL until it gets
// a success or a failure of provisioning.
// Note that both a response and an error can be returned in case of a partially-failed deployment
// (e.g., resource is created but failed to initialize to completion).
func (p *googleCloudProvider) waitForResourceOpCompletion(baseUrl string, resp map[string]interface{}) (map[string]interface{}, error) {
retryPolicy := backoff.Backoff{
Min: 1 * time.Second,
Max: 15 * time.Second,
Factor: 1.5,
Jitter: true,
}
for {
logging.V(9).Infof("waiting for completion: %+v", resp)
// There are two styles of operations: one returns a 'done' boolean flag, another one returns status='DONE'.
done, hasDone := resp["done"].(bool)
status, hasStatus := resp["status"].(string)
if completed := (hasDone && done) || (hasStatus && status == "DONE"); completed {
// Extract an error message from the response, if any.
var err error
if failure, has := resp["error"]; has {
err = errors.Errorf("operation errored with %+v", failure)
} else if statusMessage, has := resp["statusMessage"]; has {
err = errors.Errorf("operation failed with %q", statusMessage)
}
// Extract the resource response, if any.
// A partial error could happen, so both response and error could be available.
if response, has := resp["response"].(map[string]interface{}); has {
return response, err
}
if operationType, has := resp["operationType"].(string); has && strings.Contains(strings.ToLower(operationType), "delete") {
return resp, err
}
// Check if there's a target link.
if targetLink, has := resp["targetLink"].(string); has {
// Try reading resource state.
state, getErr := p.client.RequestWithTimeout("GET", targetLink, nil, 0)
if getErr != nil {
if err != nil {
// Return the original creation error if resource read failed.
return nil, err
}
return nil, getErr
}
// A partial error could happen, so both response and error could be available.
return state, err
}
// At this point, we assume either a complete failure or a clean response.
if err != nil {
return nil, err
}
return resp, nil
}
var pollUri string
if selfLink, has := resp["selfLink"].(string); has && hasStatus {
pollUri = selfLink
} else {
if name, has := resp["name"].(string); has && strings.HasPrefix(name, "operations/") {
pollUri = fmt.Sprintf("%s/v1/%s", baseUrl, name)
}
}
if pollUri == "" {
return resp, nil
}
time.Sleep(retryPolicy.Duration())
op, err := p.client.RequestWithTimeout("GET", pollUri, nil, 0)
if err != nil {
return nil, errors.Wrapf(err, "polling operation status")
}
resp = op
}
}
// Read the current live state associated with a resource.
func (p *googleCloudProvider) Read(_ context.Context, req *rpc.ReadRequest) (*rpc.ReadResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Read(%s)", p.name, urn)
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
id := req.GetId()
uri := res.ResourceUrl(id)
// Retrieve the old state.
oldState, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
// Read the current state of the resource from the API.
newState, err := p.client.RequestWithTimeout("GET", uri, nil, 0)
if err != nil {
if reqErr, ok := err.(*googleapi.Error); ok && reqErr.Code == http.StatusNotFound {
// 404 means that the resource was deleted.
return &rpc.ReadResponse{Id: ""}, nil
}
return nil, fmt.Errorf("error sending request: %s", err)
}
// Extract old inputs from the `__inputs` field of the old state.
inputs := parseCheckpointObject(oldState)
newStateProps := resource.NewPropertyMapFromMap(newState)
if inputs == nil {
return nil, status.Error(codes.Unimplemented, "Import is not yet implemented")
} else {
// It's hard to infer the changes in the inputs shape based on the outputs without false positives.
// The current approach is complicated but it's aimed to minimize the noise while refreshing:
// 0. We have "old" inputs and outputs before refresh and "new" outputs read from API.
// 1. Project old outputs to their corresponding input shape (exclude read-only properties).
oldInputProjection := getInputsFromState(res, oldState)
// 2. Project new outputs to their corresponding input shape (exclude read-only properties).
newInputProjection := getInputsFromState(res, newStateProps)
// 3. Calculate the difference between two projections. This should give us actual significant changes
// that happened in Google Cloud between the last resource update and its current state.
diff := oldInputProjection.Diff(newInputProjection)
// 4. Apply this difference to the actual inputs (not a projection) that we have in state.
inputs = applyDiff(inputs, diff)
}
// Store both outputs and inputs into the state checkpoint.
checkpoint, err := plugin.MarshalProperties(
checkpointObject(inputs, newState),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.checkpoint", label), KeepSecrets: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
inputsRecord, err := plugin.MarshalProperties(
inputs,
plugin.MarshalOptions{Label: fmt.Sprintf("%s.inputs", label), KeepUnknowns: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
return &rpc.ReadResponse{Id: id, Properties: checkpoint, Inputs: inputsRecord}, nil
}
// Update updates an existing resource with new values.
func (p *googleCloudProvider) Update(_ context.Context, req *rpc.UpdateRequest) (*rpc.UpdateResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Update(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs | return nil, err
}
// Deserialize the last known state.
oldState, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.oldState", label), SkipNulls: true,
})
if err != nil {
return nil, errors.Wrapf(err, "reading resource state")
}
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
body := p.prepareAPIInputs(inputs, oldState, res.UpdateProperties)
uri := res.ResourceUrl(req.GetId())
if strings.HasSuffix(uri, ":getIamPolicy") {
uri = strings.ReplaceAll(uri, ":getIamPolicy", ":setIamPolicy")
}
op, err := p.client.RequestWithTimeout(res.UpdateVerb, uri, body, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s: %q %+v", err, uri, body)
}
resp, err := p.waitForResourceOpCompletion(res.BaseUrl, op)
if err != nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
// Read the inputs to persist them into state.
newInputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.newInputs", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
// Store both outputs and inputs into the state and return RPC checkpoint.
outputs, err := plugin.MarshalProperties(
checkpointObject(newInputs, resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.response", label), KeepSecrets: true, SkipNulls: true},
)
return &rpc.UpdateResponse{
Properties: outputs,
}, nil
}
// Delete tears down an existing resource with the given ID. If it fails, the resource is assumed
// to still exist.
func (p *googleCloudProvider) Delete(_ context.Context, req *rpc.DeleteRequest) (*empty.Empty, error) {
urn := resource.URN(req.GetUrn())
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
uri := res.ResourceUrl(req.GetId())
if res.NoDelete {
// At the time of writing, the classic GCP provider has the same behavior and warning for 10 resources.
logging.V(1).Infof("%q resources"+
" cannot be deleted from Google Cloud. The resource %s will be removed from Pulumi"+
" state, but will still be present on Google Cloud.", resourceKey, req.GetId())
return &empty.Empty{}, nil
}
resp, err := p.client.RequestWithTimeout("DELETE", uri, nil, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s", err)
}
_, err = p.waitForResourceOpCompletion(res.BaseUrl, resp)
if err != nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
return &empty.Empty{}, nil
}
// Construct creates a new component resource.
func (p *googleCloudProvider) Construct(_ context.Context, _ *rpc.ConstructRequest) (*rpc.ConstructResponse, error) {
return nil, status.Error(codes.Unimplemented, "Construct is not yet implemented")
}
// Call dynamically executes a method in the provider associated with a component resource.
func (p *googleCloudProvider) Call(_ context.Context, _ *rpc.CallRequest) (*rpc.CallResponse, error) {
return nil, status.Error(codes.Unimplemented, "Call is not yet implemented")
}
// GetPluginInfo returns generic information about this plugin, like its version.
func (p *googleCloudProvider) GetPluginInfo(context.Context, *empty.Empty) (*rpc.PluginInfo, error) {
return &rpc.PluginInfo{
Version: p.version,
}, nil
}
// Cancel signals the provider to gracefully shut down and abort any ongoing resource operations.
// Operations aborted in this way will return an error (e.g., `Update` and `Create` will either a
// creation error or an initialization error). Since Cancel is advisory and non-blocking, it is up
// to the host to decide how long to wait after Cancel is called before (e.g.)
// hard-closing any gRPC connection.
func (p *googleCloudProvider) Cancel(context.Context, *empty.Empty) (*empty.Empty, error) {
return &empty.Empty{}, nil
}
func (p *googleCloudProvider) setLoggingContext(ctx context.Context) {
log.SetOutput(&LogRedirector{
writers: map[string]func(string) error{
tfTracePrefix: func(msg string) error { return p.host.Log(ctx, diag.Debug, "", msg) },
tfDebugPrefix: func(msg string) error { return p.host.Log(ctx, diag.Debug, "", msg) },
tfInfoPrefix: func(msg string) error { return p.host.Log(ctx, diag.Info, "", msg) },
tfWarnPrefix: func(msg string) error { return p.host.Log(ctx, diag.Warning, "", msg) },
tfErrorPrefix: func(msg string) error { return p.host.Log(ctx, diag.Error, "", msg) },
},
})
}
func (p *googleCloudProvider) getConfig(configName, envName string) string {
if val, ok := p.config[configName]; ok {
return val
}
return os.Getenv(envName)
}
func (p *googleCloudProvider) getPartnerName() string {
result := p.getConfig("partnerName", "GOOGLE_PARTNER_NAME")
if result != "" {
return result
} else {
disablePartner := p.getConfig("disablePartnerName", "GOOGLE_DISABLE_PARTNER_NAME")
if disablePartner == "true" {
return ""
}
}
return "Pulumi"
}
func getPulumiVersion() string {
if bi, ok := debug.ReadBuildInfo(); ok {
for _, dep := range bi.Deps {
if strings.HasPrefix(dep.Path, "github.com/pulumi/pulumi/pkg") {
return strings.TrimPrefix(dep.Version, "v")
}
}
}
// We should never get here but let's not panic and return something sensible if we do.
logging.V(4).Info("No Pulumi package version found, using '3' as the default version for user-agent")
return "3"
}
// partialError creates an error for resources that did not complete an operation in progress.
// The last known state of the object is included in the error so that it can be checkpointed.
func partialError(id string, err error, state *structpb.Struct, inputs *structpb.Struct) error {
detail := rpc.ErrorResourceInitFailed{
Id: id,
Properties: state,
Reasons: []string{err.Error()},
Inputs: inputs,
}
return rpcerror.WithDetails(rpcerror.New(codes.Unknown, err.Error()), &detail)
}
// checkpointObject puts inputs in the `__inputs` field of the state.
func checkpointObject(inputs resource.PropertyMap, outputs map[string]interface{}) resource.PropertyMap {
object := resource.NewPropertyMapFromMap(outputs)
object["__inputs"] = resource.MakeSecret(resource.NewObjectProperty(inputs))
return object
}
// parseCheckpointObject returns inputs that are saved in the `__inputs` field of the state.
func parseCheckpointObject(obj resource.PropertyMap) resource.PropertyMap {
if inputs, ok := obj["__inputs"]; ok {
return inputs.SecretValue().Element.ObjectValue()
}
return nil
} | inputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.properties", label), SkipNulls: true,
})
if err != nil { | random_line_split |
provider.go | // Copyright 2016-2021, Pulumi Corporation.
package provider
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"runtime/debug"
"strings"
"time"
"github.com/golang/protobuf/ptypes/empty"
structpb "github.com/golang/protobuf/ptypes/struct"
"github.com/jpillora/backoff"
"github.com/pkg/errors"
"github.com/pulumi/pulumi-google-native/provider/pkg/googleclient"
"github.com/pulumi/pulumi-google-native/provider/pkg/resources"
"github.com/pulumi/pulumi-google-native/provider/pkg/version"
"github.com/pulumi/pulumi/pkg/v3/resource/provider"
"github.com/pulumi/pulumi/sdk/v3/go/common/diag"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/rpcerror"
rpc "github.com/pulumi/pulumi/sdk/v3/proto/go"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type googleCloudProvider struct {
host *provider.HostClient
name string
version string
config map[string]string
schemaBytes []byte
client *googleclient.GoogleClient
resourceMap *resources.CloudAPIMetadata
converter *resources.SdkShapeConverter
}
func makeProvider(host *provider.HostClient, name, version string, schemaBytes []byte,
cloudAPIResourcesBytes []byte) (rpc.ResourceProviderServer, error) {
resourceMap, err := loadMetadata(cloudAPIResourcesBytes)
if err != nil {
return nil, err
}
// Return the new provider
return &googleCloudProvider{
host: host,
name: name,
version: version,
config: map[string]string{},
schemaBytes: schemaBytes,
resourceMap: resourceMap,
converter: &resources.SdkShapeConverter{Types: resourceMap.Types},
}, nil
}
// loadMetadata deserializes the provided compressed json byte array into a CloudAPIMetadata struct.
func loadMetadata(metadataBytes []byte) (*resources.CloudAPIMetadata, error) {
var resourceMap resources.CloudAPIMetadata
uncompressed, err := gzip.NewReader(bytes.NewReader(metadataBytes))
if err != nil {
return nil, errors.Wrap(err, "expand compressed metadata")
}
if err = json.NewDecoder(uncompressed).Decode(&resourceMap); err != nil |
if err = uncompressed.Close(); err != nil {
return nil, errors.Wrap(err, "closing uncompress stream for metadata")
}
return &resourceMap, nil
}
// Configure configures the resource provider with "globals" that control its behavior.
func (p *googleCloudProvider) Configure(ctx context.Context,
req *rpc.ConfigureRequest) (*rpc.ConfigureResponse, error) {
for key, val := range req.GetVariables() {
p.config[strings.TrimPrefix(key, "google-native:config:")] = val
}
p.setLoggingContext(ctx)
impersonateServiceAccountDelegatesString := p.getConfig("impersonateServiceAccountDelegates", "")
var impersonateServiceAccountDelegates []string
if impersonateServiceAccountDelegatesString != "" {
err := json.Unmarshal([]byte(impersonateServiceAccountDelegatesString), &impersonateServiceAccountDelegates)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal %q as Impersonate Service Account Delegates", impersonateServiceAccountDelegatesString)
}
}
scopesString := p.getConfig("scopes", "")
var scopes []string
if scopesString != "" {
err := json.Unmarshal([]byte(scopesString), &scopes)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal %q as Scopes", scopesString)
}
}
appendUserAgent := p.getConfig("appendUserAgent", "GOOGLE_APPEND_USER_AGENT")
config := googleclient.Config{
Credentials: p.getConfig("credentials", "GOOGLE_CREDENTIALS"),
AccessToken: p.getConfig("accessToken", "GOOGLE_OAUTH_ACCESS_TOKEN"),
ImpersonateServiceAccount: p.getConfig("impersonateServiceAccount", "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT"),
ImpersonateServiceAccountDelegates: impersonateServiceAccountDelegates,
Scopes: scopes,
PulumiVersion: getPulumiVersion(),
ProviderVersion: version.Version,
PartnerName: p.getPartnerName(),
AppendUserAgent: appendUserAgent,
}
client, err := googleclient.New(ctx, config)
if err != nil {
return nil, err
}
p.client = client
return &rpc.ConfigureResponse{
AcceptSecrets: true,
}, nil
}
// Invoke dynamically executes a built-in function in the provider.
func (p *googleCloudProvider) Invoke(_ context.Context, req *rpc.InvokeRequest) (*rpc.InvokeResponse, error) {
label := fmt.Sprintf("%s.Invoke(%s)", p.name, req.Tok)
inv, ok := p.resourceMap.Functions[req.Tok]
if !ok {
return nil, errors.Errorf("invoke %q not found", req.Tok)
}
args, err := plugin.UnmarshalProperties(req.GetArgs(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.args", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
// Apply default config values.
for _, param := range inv.Params {
sdkName := param.Name
if param.SdkName != "" {
sdkName = param.SdkName
}
switch sdkName {
case "project":
key := resource.PropertyKey(sdkName)
if value, ok := p.getDefaultValue(key, sdkName, args); ok {
args[key] = *value
}
}
}
uri, err := buildFunctionUrl(inv, args)
if err != nil {
return nil, err
}
resp, err := p.client.RequestWithTimeout(inv.Verb, uri, nil, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s", err)
}
// Serialize and return outputs.
result, err := plugin.MarshalProperties(
resource.NewPropertyMapFromMap(resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.response", label), SkipNulls: true},
)
if err != nil {
return nil, err
}
return &rpc.InvokeResponse{Return: result}, nil
}
// StreamInvoke dynamically executes a built-in function in the provider. The result is streamed
// back as a series of messages.
func (p *googleCloudProvider) StreamInvoke(_ *rpc.InvokeRequest, _ rpc.ResourceProvider_StreamInvokeServer) error {
return status.Error(codes.Unimplemented, "StreamInvoke is not yet implemented")
}
// Check validates that the given property bag is valid for a resource of the given type and returns
// the inputs that should be passed to successive calls to Diff, Create, or Update for this
// resource. As a rule, the provider inputs returned by a call to Check should preserve the original
// representation of the properties as present in the program inputs. Though this rule is not
// required for correctness, violations thereof can negatively impact the end-user experience, as
// the provider inputs are using for detecting and rendering diffs.
func (p *googleCloudProvider) Check(_ context.Context, req *rpc.CheckRequest) (*rpc.CheckResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Check(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs.
olds, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
news, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.news", label), KeepUnknowns: true, SkipNulls: true,
})
if err != nil {
return nil, err
}
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
// Apply default config values.
var failures []*rpc.CheckFailure
for _, param := range res.CreateParams {
sdkName := param.Name
if param.SdkName != "" {
sdkName = param.SdkName
}
switch sdkName {
case "project", "location", "zone":
key := resource.PropertyKey(sdkName)
configName := sdkName
if sdkName == "location" {
configName = "region"
}
if _, has := news[key]; has {
continue
}
if value, ok := p.getDefaultValue(key, configName, olds); ok {
news[key] = *value
} else {
reason := fmt.Sprintf("missing required property '%s'. Either set it explicitly or configure it with 'pulumi config set google-native:%s <value>'.", sdkName, configName)
failures = append(failures, &rpc.CheckFailure{
Reason: reason,
})
}
}
}
// Auto-naming.
nameKey := resource.PropertyKey("name")
if res.AutoNamePattern != "" && !news.HasValue(nameKey) {
news[nameKey] = getDefaultName(urn, res.AutoNamePattern, olds, news)
}
// Apply property patterns.
for name, prop := range res.CreateProperties {
key := resource.PropertyKey(name)
if prop.SdkName != "" {
key = resource.PropertyKey(prop.SdkName)
}
if value, ok := applyPropertyPattern(key, prop, news); ok {
news[key] = *value
}
}
resInputs, err := plugin.MarshalProperties(news, plugin.MarshalOptions{
Label: fmt.Sprintf("%s.resInputs", label), KeepUnknowns: true})
if err != nil {
return nil, err
}
return &rpc.CheckResponse{Inputs: resInputs, Failures: failures}, nil
}
// Get a default project name for the given inputs.
func (p *googleCloudProvider) getDefaultValue(key resource.PropertyKey, configName string, olds resource.PropertyMap) (*resource.PropertyValue, bool) {
// 1. Check if old inputs define the value.
if v, ok := olds[key]; ok {
return &v, true
}
// 2. Check if the config has a corresponding value.
if cv, ok := p.config[configName]; ok {
v := resource.NewStringProperty(cv)
return &v, true
}
return nil, false
}
func (p *googleCloudProvider) GetSchema(_ context.Context, req *rpc.GetSchemaRequest) (*rpc.GetSchemaResponse, error) {
if v := req.GetVersion(); v != 0 {
return nil, fmt.Errorf("unsupported schema version %d", v)
}
return &rpc.GetSchemaResponse{Schema: string(p.schemaBytes)}, nil
}
// CheckConfig validates the configuration for this provider.
func (p *googleCloudProvider) CheckConfig(_ context.Context, req *rpc.CheckRequest) (*rpc.CheckResponse, error) {
return &rpc.CheckResponse{Inputs: req.GetNews()}, nil
}
// DiffConfig diffs the configuration for this provider.
func (p *googleCloudProvider) DiffConfig(context.Context, *rpc.DiffRequest) (*rpc.DiffResponse, error) {
return &rpc.DiffResponse{
Changes: 0,
Replaces: []string{},
Stables: []string{},
DeleteBeforeReplace: false,
}, nil
}
// Diff checks what impacts a hypothetical update will have on the resource's properties.
func (p *googleCloudProvider) Diff(_ context.Context, req *rpc.DiffRequest) (*rpc.DiffResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Diff(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
oldState, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.oldState", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
// Extract old inputs from the `__inputs` field of the old state.
oldInputs := parseCheckpointObject(oldState)
newInputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.newInputs", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
diff := oldInputs.Diff(newInputs)
if diff == nil {
return &rpc.DiffResponse{Changes: rpc.DiffResponse_DIFF_NONE}, nil
}
var replaces []string
for _, p := range res.CreateParams {
sdkName := p.Name
if p.SdkName != "" {
sdkName = p.SdkName
}
replaces = append(replaces, sdkName)
}
for name, prop := range res.CreateProperties {
if _, has := res.UpdateProperties[name]; !has {
sdkName := name
if prop.SdkName != "" {
sdkName = prop.SdkName
}
replaces = append(replaces, sdkName)
}
}
// Uploads are only supported for create methods, not updates.
if res.AssetUpload {
if _, ok := diff.Updates[resource.PropertyKey("source")]; ok {
replaces = append(replaces, "source")
}
}
return &rpc.DiffResponse{Changes: rpc.DiffResponse_DIFF_UNKNOWN, Replaces: replaces, DeleteBeforeReplace: true}, nil
}
// Create allocates a new instance of the provided resource and returns its unique ID afterwards.
func (p *googleCloudProvider) Create(ctx context.Context, req *rpc.CreateRequest) (*rpc.CreateResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Create(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs
inputs, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.properties", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
inputsMap := inputs.Mappable()
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
uri, err := buildCreateUrl(res, inputs)
if err != nil {
return nil, err
}
body := p.prepareAPIInputs(inputs, nil, res.CreateProperties)
var op map[string]interface{}
if res.AssetUpload {
var content []byte
source := inputs["source"]
if source.IsAsset() {
content, err = source.AssetValue().Bytes()
} else if source.IsArchive() {
content, err = source.ArchiveValue().Bytes(resource.ZIPArchive)
}
if err != nil {
return nil, err
}
op, err = p.client.UploadWithTimeout(res.CreateVerb, uri, body, content, 0)
if err != nil {
return nil, fmt.Errorf("error sending upload request: %s: %q %+v %d", err, uri, inputs.Mappable(), len(content))
}
} else {
op, err = p.client.RequestWithTimeout(res.CreateVerb, uri, body, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s: %q %+v", err, uri, inputs.Mappable())
}
}
resp, err := p.waitForResourceOpCompletion(res.BaseUrl, op)
if err != nil {
if resp == nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
// A partial failure may have occurred because we got an error and a response.
// Try reading the resource state and return a partial error if there is some.
id, idErr := calculateResourceId(res, inputsMap, resp)
if idErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / calculate ID %s", idErr)
}
readResp, getErr := p.client.RequestWithTimeout("GET", id, nil, 0)
if getErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / read state %s", getErr)
}
checkpoint, cpErr := plugin.MarshalProperties(
checkpointObject(inputs, readResp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.partialCheckpoint", label), KeepSecrets: true, SkipNulls: true},
)
if cpErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / checkpoint %s", cpErr)
}
return nil, partialError(id, err, checkpoint, req.GetProperties())
}
// Store both outputs and inputs into the state.
checkpoint, err := plugin.MarshalProperties(
checkpointObject(inputs, resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.checkpoint", label), KeepSecrets: true, SkipNulls: true},
)
id, err := calculateResourceId(res, inputsMap, resp)
if err != nil {
return nil, errors.Wrapf(err, "calculating resource ID")
}
return &rpc.CreateResponse{
Id: id,
Properties: checkpoint,
}, nil
}
func (p *googleCloudProvider) prepareAPIInputs(
inputs, state resource.PropertyMap,
properties map[string]resources.CloudAPIProperty) map[string]interface{} {
inputsMap := inputs.Mappable()
stateMap := state.Mappable()
return p.converter.SdkPropertiesToRequestBody(properties, inputsMap, stateMap)
}
// waitForResourceOpCompletion keeps polling the resource or operation URL until it gets
// a success or a failure of provisioning.
// Note that both a response and an error can be returned in case of a partially-failed deployment
// (e.g., resource is created but failed to initialize to completion).
func (p *googleCloudProvider) waitForResourceOpCompletion(baseUrl string, resp map[string]interface{}) (map[string]interface{}, error) {
retryPolicy := backoff.Backoff{
Min: 1 * time.Second,
Max: 15 * time.Second,
Factor: 1.5,
Jitter: true,
}
for {
logging.V(9).Infof("waiting for completion: %+v", resp)
// There are two styles of operations: one returns a 'done' boolean flag, another one returns status='DONE'.
done, hasDone := resp["done"].(bool)
status, hasStatus := resp["status"].(string)
if completed := (hasDone && done) || (hasStatus && status == "DONE"); completed {
// Extract an error message from the response, if any.
var err error
if failure, has := resp["error"]; has {
err = errors.Errorf("operation errored with %+v", failure)
} else if statusMessage, has := resp["statusMessage"]; has {
err = errors.Errorf("operation failed with %q", statusMessage)
}
// Extract the resource response, if any.
// A partial error could happen, so both response and error could be available.
if response, has := resp["response"].(map[string]interface{}); has {
return response, err
}
if operationType, has := resp["operationType"].(string); has && strings.Contains(strings.ToLower(operationType), "delete") {
return resp, err
}
// Check if there's a target link.
if targetLink, has := resp["targetLink"].(string); has {
// Try reading resource state.
state, getErr := p.client.RequestWithTimeout("GET", targetLink, nil, 0)
if getErr != nil {
if err != nil {
// Return the original creation error if resource read failed.
return nil, err
}
return nil, getErr
}
// A partial error could happen, so both response and error could be available.
return state, err
}
// At this point, we assume either a complete failure or a clean response.
if err != nil {
return nil, err
}
return resp, nil
}
var pollUri string
if selfLink, has := resp["selfLink"].(string); has && hasStatus {
pollUri = selfLink
} else {
if name, has := resp["name"].(string); has && strings.HasPrefix(name, "operations/") {
pollUri = fmt.Sprintf("%s/v1/%s", baseUrl, name)
}
}
if pollUri == "" {
return resp, nil
}
time.Sleep(retryPolicy.Duration())
op, err := p.client.RequestWithTimeout("GET", pollUri, nil, 0)
if err != nil {
return nil, errors.Wrapf(err, "polling operation status")
}
resp = op
}
}
// Read the current live state associated with a resource.
func (p *googleCloudProvider) Read(_ context.Context, req *rpc.ReadRequest) (*rpc.ReadResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Read(%s)", p.name, urn)
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
id := req.GetId()
uri := res.ResourceUrl(id)
// Retrieve the old state.
oldState, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
// Read the current state of the resource from the API.
newState, err := p.client.RequestWithTimeout("GET", uri, nil, 0)
if err != nil {
if reqErr, ok := err.(*googleapi.Error); ok && reqErr.Code == http.StatusNotFound {
// 404 means that the resource was deleted.
return &rpc.ReadResponse{Id: ""}, nil
}
return nil, fmt.Errorf("error sending request: %s", err)
}
// Extract old inputs from the `__inputs` field of the old state.
inputs := parseCheckpointObject(oldState)
newStateProps := resource.NewPropertyMapFromMap(newState)
if inputs == nil {
return nil, status.Error(codes.Unimplemented, "Import is not yet implemented")
} else {
// It's hard to infer the changes in the inputs shape based on the outputs without false positives.
// The current approach is complicated but it's aimed to minimize the noise while refreshing:
// 0. We have "old" inputs and outputs before refresh and "new" outputs read from API.
// 1. Project old outputs to their corresponding input shape (exclude read-only properties).
oldInputProjection := getInputsFromState(res, oldState)
// 2. Project new outputs to their corresponding input shape (exclude read-only properties).
newInputProjection := getInputsFromState(res, newStateProps)
// 3. Calculate the difference between two projections. This should give us actual significant changes
// that happened in Google Cloud between the last resource update and its current state.
diff := oldInputProjection.Diff(newInputProjection)
// 4. Apply this difference to the actual inputs (not a projection) that we have in state.
inputs = applyDiff(inputs, diff)
}
// Store both outputs and inputs into the state checkpoint.
checkpoint, err := plugin.MarshalProperties(
checkpointObject(inputs, newState),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.checkpoint", label), KeepSecrets: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
inputsRecord, err := plugin.MarshalProperties(
inputs,
plugin.MarshalOptions{Label: fmt.Sprintf("%s.inputs", label), KeepUnknowns: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
return &rpc.ReadResponse{Id: id, Properties: checkpoint, Inputs: inputsRecord}, nil
}
// Update updates an existing resource with new values.
func (p *googleCloudProvider) Update(_ context.Context, req *rpc.UpdateRequest) (*rpc.UpdateResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Update(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs
inputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.properties", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
// Deserialize the last known state.
oldState, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.oldState", label), SkipNulls: true,
})
if err != nil {
return nil, errors.Wrapf(err, "reading resource state")
}
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
body := p.prepareAPIInputs(inputs, oldState, res.UpdateProperties)
uri := res.ResourceUrl(req.GetId())
if strings.HasSuffix(uri, ":getIamPolicy") {
uri = strings.ReplaceAll(uri, ":getIamPolicy", ":setIamPolicy")
}
op, err := p.client.RequestWithTimeout(res.UpdateVerb, uri, body, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s: %q %+v", err, uri, body)
}
resp, err := p.waitForResourceOpCompletion(res.BaseUrl, op)
if err != nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
// Read the inputs to persist them into state.
newInputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.newInputs", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
// Store both outputs and inputs into the state and return RPC checkpoint.
outputs, err := plugin.MarshalProperties(
checkpointObject(newInputs, resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.response", label), KeepSecrets: true, SkipNulls: true},
)
return &rpc.UpdateResponse{
Properties: outputs,
}, nil
}
// Delete tears down an existing resource with the given ID. If it fails, the resource is assumed
// to still exist.
func (p *googleCloudProvider) Delete(_ context.Context, req *rpc.DeleteRequest) (*empty.Empty, error) {
urn := resource.URN(req.GetUrn())
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
uri := res.ResourceUrl(req.GetId())
if res.NoDelete {
// At the time of writing, the classic GCP provider has the same behavior and warning for 10 resources.
logging.V(1).Infof("%q resources"+
" cannot be deleted from Google Cloud. The resource %s will be removed from Pulumi"+
" state, but will still be present on Google Cloud.", resourceKey, req.GetId())
return &empty.Empty{}, nil
}
resp, err := p.client.RequestWithTimeout("DELETE", uri, nil, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s", err)
}
_, err = p.waitForResourceOpCompletion(res.BaseUrl, resp)
if err != nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
return &empty.Empty{}, nil
}
// Construct creates a new component resource.
func (p *googleCloudProvider) Construct(_ context.Context, _ *rpc.ConstructRequest) (*rpc.ConstructResponse, error) {
return nil, status.Error(codes.Unimplemented, "Construct is not yet implemented")
}
// Call dynamically executes a method in the provider associated with a component resource.
func (p *googleCloudProvider) Call(_ context.Context, _ *rpc.CallRequest) (*rpc.CallResponse, error) {
return nil, status.Error(codes.Unimplemented, "Call is not yet implemented")
}
// GetPluginInfo returns generic information about this plugin, like its version.
func (p *googleCloudProvider) GetPluginInfo(context.Context, *empty.Empty) (*rpc.PluginInfo, error) {
return &rpc.PluginInfo{
Version: p.version,
}, nil
}
// Cancel signals the provider to gracefully shut down and abort any ongoing resource operations.
// Operations aborted in this way will return an error (e.g., `Update` and `Create` will either a
// creation error or an initialization error). Since Cancel is advisory and non-blocking, it is up
// to the host to decide how long to wait after Cancel is called before (e.g.)
// hard-closing any gRPC connection.
func (p *googleCloudProvider) Cancel(context.Context, *empty.Empty) (*empty.Empty, error) {
return &empty.Empty{}, nil
}
func (p *googleCloudProvider) setLoggingContext(ctx context.Context) {
log.SetOutput(&LogRedirector{
writers: map[string]func(string) error{
tfTracePrefix: func(msg string) error { return p.host.Log(ctx, diag.Debug, "", msg) },
tfDebugPrefix: func(msg string) error { return p.host.Log(ctx, diag.Debug, "", msg) },
tfInfoPrefix: func(msg string) error { return p.host.Log(ctx, diag.Info, "", msg) },
tfWarnPrefix: func(msg string) error { return p.host.Log(ctx, diag.Warning, "", msg) },
tfErrorPrefix: func(msg string) error { return p.host.Log(ctx, diag.Error, "", msg) },
},
})
}
func (p *googleCloudProvider) getConfig(configName, envName string) string {
if val, ok := p.config[configName]; ok {
return val
}
return os.Getenv(envName)
}
func (p *googleCloudProvider) getPartnerName() string {
result := p.getConfig("partnerName", "GOOGLE_PARTNER_NAME")
if result != "" {
return result
} else {
disablePartner := p.getConfig("disablePartnerName", "GOOGLE_DISABLE_PARTNER_NAME")
if disablePartner == "true" {
return ""
}
}
return "Pulumi"
}
func getPulumiVersion() string {
if bi, ok := debug.ReadBuildInfo(); ok {
for _, dep := range bi.Deps {
if strings.HasPrefix(dep.Path, "github.com/pulumi/pulumi/pkg") {
return strings.TrimPrefix(dep.Version, "v")
}
}
}
// We should never get here but let's not panic and return something sensible if we do.
logging.V(4).Info("No Pulumi package version found, using '3' as the default version for user-agent")
return "3"
}
// partialError creates an error for resources that did not complete an operation in progress.
// The last known state of the object is included in the error so that it can be checkpointed.
func partialError(id string, err error, state *structpb.Struct, inputs *structpb.Struct) error {
detail := rpc.ErrorResourceInitFailed{
Id: id,
Properties: state,
Reasons: []string{err.Error()},
Inputs: inputs,
}
return rpcerror.WithDetails(rpcerror.New(codes.Unknown, err.Error()), &detail)
}
// checkpointObject puts inputs in the `__inputs` field of the state.
func checkpointObject(inputs resource.PropertyMap, outputs map[string]interface{}) resource.PropertyMap {
object := resource.NewPropertyMapFromMap(outputs)
object["__inputs"] = resource.MakeSecret(resource.NewObjectProperty(inputs))
return object
}
// parseCheckpointObject returns inputs that are saved in the `__inputs` field of the state.
func parseCheckpointObject(obj resource.PropertyMap) resource.PropertyMap {
if inputs, ok := obj["__inputs"]; ok {
return inputs.SecretValue().Element.ObjectValue()
}
return nil
}
| {
return nil, errors.Wrap(err, "unmarshalling resource map")
} | conditional_block |
fetcher_default.go | // Copyright © 2023 Ory Corp
// SPDX-License-Identifier: Apache-2.0
package rule
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"io"
"net/http"
"net/url"
"path/filepath"
"strings"
"sync"
"time"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
"gocloud.dev/blob"
_ "gocloud.dev/blob/azureblob"
_ "gocloud.dev/blob/gcsblob"
_ "gocloud.dev/blob/s3blob"
"github.com/ory/x/httpx"
"github.com/ory/x/urlx"
"github.com/ory/x/watcherx"
"github.com/ory/oathkeeper/driver/configuration"
"github.com/ory/oathkeeper/internal/cloudstorage"
"github.com/ory/oathkeeper/x"
)
var _ Fetcher = new(FetcherDefault)
type fetcherRegistry interface {
x.RegistryLogger
RuleRepository() Repository
}
type FetcherDefault struct {
config configuration.Provider
registry fetcherRegistry
hc *http.Client
mux *blob.URLMux
cache map[string][]Rule
cancelWatchers map[string]context.CancelFunc
events chan watcherx.Event
lock sync.Mutex
}
func NewFetcherDefault(
config configuration.Provider,
registry fetcherRegistry,
) *FetcherDefault {
return &FetcherDefault{
registry: registry,
config: config,
mux: cloudstorage.NewURLMux(),
hc: httpx.NewResilientClient(httpx.ResilientClientWithConnectionTimeout(15 * time.Second)).StandardClient(),
cache: make(map[string][]Rule),
cancelWatchers: make(map[string]context.CancelFunc),
events: make(chan watcherx.Event),
}
}
func (f *FetcherDefault) SetURLMux(mux *blob.URLMux) {
f.mux = mux
}
func splitLocalRemoteRepos(ruleRepos []url.URL) (files []string, nonFiles []url.URL) {
files = make([]string, 0, len(ruleRepos))
nonFiles = make([]url.URL, 0, len(ruleRepos))
for _, repo := range ruleRepos {
if repo.Scheme == "file" || repo.Scheme == "" {
files = append(files,
filepath.Clean(
urlx.GetURLFilePath(&repo)))
} else {
nonFiles = append(nonFiles, repo)
}
}
return files, nonFiles
}
// watchLocalFiles watches all files that are configured in the config and are not watched already.
// It also cancels watchers for files that are no longer configured. This function is idempotent.
func (f *FetcherDefault) watchLocalFiles(ctx context.Context) {
f.lock.Lock()
repoChanged := false
cancelWatchers := make(map[string]context.CancelFunc, len(f.cancelWatchers))
localFiles, _ := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
for _, fp := range localFiles {
if cancel, ok := f.cancelWatchers[fp]; !ok {
// watch all files we are not yet watching
repoChanged = true
ctx, cancelWatchers[fp] = context.WithCancel(ctx)
w, err := watcherx.WatchFile(ctx, fp, f.events)
if err != nil { | // we force reading the files
done, err := w.DispatchNow()
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to read file, ignoring it.")
continue
}
go func() { <-done }() // we do not need to wait here, but we need to clear the channel
} else {
// keep watching files we are already watching
cancelWatchers[fp] = cancel
}
}
// cancel watchers for files we are no longer watching
for fp, cancel := range f.cancelWatchers {
if _, ok := cancelWatchers[fp]; !ok {
f.registry.Logger().WithField("file", fp).Info("Stopped watching access rule file.")
repoChanged = true
cancel()
delete(f.cache, fp)
}
}
f.cancelWatchers = cancelWatchers
f.lock.Unlock() // release lock before processing events
if repoChanged {
f.registry.Logger().WithField("repos", f.config.Get(configuration.AccessRuleRepositories)).Info("Detected access rule repository change, processing updates.")
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local repo change").Error("Unable to update access rules.")
}
}
}
func (f *FetcherDefault) Watch(ctx context.Context) error {
f.watchLocalFiles(ctx)
getRemoteRepos := func() map[url.URL]struct{} {
_, remoteRepos := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
repos := make(map[url.URL]struct{}, len(remoteRepos))
for _, repo := range remoteRepos {
repos[repo] = struct{}{}
}
return repos
}
// capture the previous config values to detect changes, and trigger initial processing
strategy := f.config.AccessRuleMatchingStrategy()
if err := f.processStrategyUpdate(ctx, strategy); err != nil {
return err
}
remoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, nil, remoteRepos); err != nil {
return err
}
f.config.AddWatcher(func(_ watcherx.Event, err error) {
if err != nil {
return
}
// watch files that need to be watched
f.watchLocalFiles(ctx)
// update the matching strategy if it changed
if newStrategy := f.config.AccessRuleMatchingStrategy(); newStrategy != strategy {
f.registry.Logger().WithField("strategy", newStrategy).Info("Detected access rule matching strategy change, processing updates.")
if err := f.processStrategyUpdate(ctx, newStrategy); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update access rule matching strategy.")
} else {
strategy = newStrategy
}
}
// update & fetch the remote repos if they changed
newRemoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, remoteRepos, newRemoteRepos); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update remote access rule repository config.")
}
remoteRepos = newRemoteRepos
})
go f.processLocalUpdates(ctx)
return nil
}
func (f *FetcherDefault) processStrategyUpdate(ctx context.Context, newValue configuration.MatchingStrategy) error {
if err := f.registry.RuleRepository().SetMatchingStrategy(ctx, newValue); err != nil {
return err
}
return nil
}
func (f *FetcherDefault) processRemoteRepoUpdate(ctx context.Context, oldRepos, newRepos map[url.URL]struct{}) error {
repoChanged := false
for repo := range newRepos {
if _, ok := f.cache[repo.String()]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("New repo detected, fetching access rules.")
rules, err := f.fetch(repo)
if err != nil {
f.registry.Logger().WithError(err).WithField("repo", repo.String()).Error("Unable to fetch access rules.")
return err
}
f.cacheRules(repo.String(), rules)
}
}
for repo := range oldRepos {
if _, ok := newRepos[repo]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("Repo was removed, removing access rules.")
f.lock.Lock()
delete(f.cache, repo.String())
f.lock.Unlock()
}
}
if repoChanged {
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "remote change").Error("Unable to update access rules.")
return err
}
}
return nil
}
func (f *FetcherDefault) processLocalUpdates(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case e, ok := <-f.events:
if !ok {
// channel was closed
return
}
f.registry.Logger().
WithField("event", "fsnotify").
WithField("file", e.Source()).
Info("Detected file change for access rules. Triggering a reload.")
if e.Reader() == nil {
f.registry.Logger().WithField("file", e.Source()).Error("Unable to read access rules probably because they were deleted, skipping those.")
continue
}
rules, err := f.decode(e.Reader())
if err != nil {
f.registry.Logger().WithField("file", e.Source()).WithError(err).Error("Unable to decode access rules, skipping those.")
continue
}
f.cacheRules(e.Source(), rules)
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local change").Error("Unable to update access rules.")
}
}
}
}
func (f *FetcherDefault) cacheRules(source string, rules []Rule) {
f.lock.Lock()
defer f.lock.Unlock()
f.cache[source] = rules
}
func (f *FetcherDefault) updateRulesFromCache(ctx context.Context) error {
f.lock.Lock()
defer f.lock.Unlock()
allRules := make([]Rule, 0)
for _, rules := range f.cache {
allRules = append(allRules, rules...)
}
return f.registry.RuleRepository().Set(ctx, allRules)
}
func (f *FetcherDefault) fetch(source url.URL) ([]Rule, error) {
f.registry.Logger().
WithField("location", source.String()).
Debugf("Fetching access rules from given location because something changed.")
switch source.Scheme {
case "azblob", "gs", "s3":
return f.fetchFromStorage(source)
case "http", "https":
return f.fetchRemote(source.String())
case "inline":
src, err := base64.StdEncoding.DecodeString(strings.Replace(source.String(), "inline://", "", 1))
if err != nil {
return nil, errors.Wrapf(err, "rule: %s", source.String())
}
return f.decode(bytes.NewBuffer(src))
}
return nil, errors.Errorf("rule: source url uses an unknown scheme: %s", source.String())
}
func (f *FetcherDefault) fetchRemote(source string) ([]Rule, error) {
res, err := f.hc.Get(source)
if err != nil {
return nil, errors.Wrapf(err, "rule: %s", source)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, errors.Errorf("rule: expected http response status code 200 but got %d when fetching: %s", res.StatusCode, source)
}
return f.decode(res.Body)
}
func (f *FetcherDefault) decode(r io.Reader) ([]Rule, error) {
b, err := io.ReadAll(r)
if err != nil {
return nil, errors.WithStack(err)
}
var ks []Rule
if json.Valid(b) {
d := json.NewDecoder(bytes.NewReader(b))
d.DisallowUnknownFields()
if err := d.Decode(&ks); err != nil {
return nil, errors.WithStack(err)
}
return ks, nil
}
if err := yaml.Unmarshal(b, &ks); err != nil {
return nil, errors.WithStack(err)
}
return ks, nil
}
func (f *FetcherDefault) fetchFromStorage(source url.URL) ([]Rule, error) {
ctx := context.Background()
bucket, err := f.mux.OpenBucket(ctx, source.Scheme+"://"+source.Host)
if err != nil {
return nil, err
}
defer bucket.Close()
r, err := bucket.NewReader(ctx, source.Path[1:], nil)
if err != nil {
return nil, err
}
defer r.Close()
return f.decode(r)
}
|
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to watch file, ignoring it.")
continue
}
| conditional_block |
fetcher_default.go | // Copyright © 2023 Ory Corp
// SPDX-License-Identifier: Apache-2.0
package rule
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"io"
"net/http"
"net/url"
"path/filepath"
"strings"
"sync"
"time"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
"gocloud.dev/blob"
_ "gocloud.dev/blob/azureblob"
_ "gocloud.dev/blob/gcsblob"
_ "gocloud.dev/blob/s3blob"
"github.com/ory/x/httpx"
"github.com/ory/x/urlx"
"github.com/ory/x/watcherx"
"github.com/ory/oathkeeper/driver/configuration"
"github.com/ory/oathkeeper/internal/cloudstorage"
"github.com/ory/oathkeeper/x"
)
var _ Fetcher = new(FetcherDefault)
type fetcherRegistry interface {
x.RegistryLogger
RuleRepository() Repository
}
type FetcherDefault struct {
config configuration.Provider
registry fetcherRegistry
hc *http.Client
mux *blob.URLMux
cache map[string][]Rule
cancelWatchers map[string]context.CancelFunc
events chan watcherx.Event
lock sync.Mutex
}
func NewFetcherDefault(
config configuration.Provider,
registry fetcherRegistry,
) *FetcherDefault {
return &FetcherDefault{
registry: registry,
config: config,
mux: cloudstorage.NewURLMux(),
hc: httpx.NewResilientClient(httpx.ResilientClientWithConnectionTimeout(15 * time.Second)).StandardClient(),
cache: make(map[string][]Rule),
cancelWatchers: make(map[string]context.CancelFunc),
events: make(chan watcherx.Event),
}
}
func (f *FetcherDefault) SetURLMux(mux *blob.URLMux) {
f.mux = mux
}
func splitLocalRemoteRepos(ruleRepos []url.URL) (files []string, nonFiles []url.URL) {
files = make([]string, 0, len(ruleRepos))
nonFiles = make([]url.URL, 0, len(ruleRepos))
for _, repo := range ruleRepos {
if repo.Scheme == "file" || repo.Scheme == "" {
files = append(files,
filepath.Clean(
urlx.GetURLFilePath(&repo)))
} else {
nonFiles = append(nonFiles, repo)
}
}
return files, nonFiles
}
// watchLocalFiles watches all files that are configured in the config and are not watched already.
// It also cancels watchers for files that are no longer configured. This function is idempotent.
func (f *FetcherDefault) watchLocalFiles(ctx context.Context) {
f.lock.Lock()
repoChanged := false
cancelWatchers := make(map[string]context.CancelFunc, len(f.cancelWatchers))
localFiles, _ := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
for _, fp := range localFiles {
if cancel, ok := f.cancelWatchers[fp]; !ok {
// watch all files we are not yet watching
repoChanged = true
ctx, cancelWatchers[fp] = context.WithCancel(ctx)
w, err := watcherx.WatchFile(ctx, fp, f.events)
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to watch file, ignoring it.")
continue
}
// we force reading the files
done, err := w.DispatchNow()
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to read file, ignoring it.")
continue
}
go func() { <-done }() // we do not need to wait here, but we need to clear the channel
} else {
// keep watching files we are already watching
cancelWatchers[fp] = cancel
}
}
// cancel watchers for files we are no longer watching
for fp, cancel := range f.cancelWatchers {
if _, ok := cancelWatchers[fp]; !ok {
f.registry.Logger().WithField("file", fp).Info("Stopped watching access rule file.")
repoChanged = true
cancel()
delete(f.cache, fp)
}
}
f.cancelWatchers = cancelWatchers
f.lock.Unlock() // release lock before processing events
if repoChanged { | if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local repo change").Error("Unable to update access rules.")
}
}
}
func (f *FetcherDefault) Watch(ctx context.Context) error {
f.watchLocalFiles(ctx)
getRemoteRepos := func() map[url.URL]struct{} {
_, remoteRepos := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
repos := make(map[url.URL]struct{}, len(remoteRepos))
for _, repo := range remoteRepos {
repos[repo] = struct{}{}
}
return repos
}
// capture the previous config values to detect changes, and trigger initial processing
strategy := f.config.AccessRuleMatchingStrategy()
if err := f.processStrategyUpdate(ctx, strategy); err != nil {
return err
}
remoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, nil, remoteRepos); err != nil {
return err
}
f.config.AddWatcher(func(_ watcherx.Event, err error) {
if err != nil {
return
}
// watch files that need to be watched
f.watchLocalFiles(ctx)
// update the matching strategy if it changed
if newStrategy := f.config.AccessRuleMatchingStrategy(); newStrategy != strategy {
f.registry.Logger().WithField("strategy", newStrategy).Info("Detected access rule matching strategy change, processing updates.")
if err := f.processStrategyUpdate(ctx, newStrategy); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update access rule matching strategy.")
} else {
strategy = newStrategy
}
}
// update & fetch the remote repos if they changed
newRemoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, remoteRepos, newRemoteRepos); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update remote access rule repository config.")
}
remoteRepos = newRemoteRepos
})
go f.processLocalUpdates(ctx)
return nil
}
func (f *FetcherDefault) processStrategyUpdate(ctx context.Context, newValue configuration.MatchingStrategy) error {
if err := f.registry.RuleRepository().SetMatchingStrategy(ctx, newValue); err != nil {
return err
}
return nil
}
func (f *FetcherDefault) processRemoteRepoUpdate(ctx context.Context, oldRepos, newRepos map[url.URL]struct{}) error {
repoChanged := false
for repo := range newRepos {
if _, ok := f.cache[repo.String()]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("New repo detected, fetching access rules.")
rules, err := f.fetch(repo)
if err != nil {
f.registry.Logger().WithError(err).WithField("repo", repo.String()).Error("Unable to fetch access rules.")
return err
}
f.cacheRules(repo.String(), rules)
}
}
for repo := range oldRepos {
if _, ok := newRepos[repo]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("Repo was removed, removing access rules.")
f.lock.Lock()
delete(f.cache, repo.String())
f.lock.Unlock()
}
}
if repoChanged {
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "remote change").Error("Unable to update access rules.")
return err
}
}
return nil
}
func (f *FetcherDefault) processLocalUpdates(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case e, ok := <-f.events:
if !ok {
// channel was closed
return
}
f.registry.Logger().
WithField("event", "fsnotify").
WithField("file", e.Source()).
Info("Detected file change for access rules. Triggering a reload.")
if e.Reader() == nil {
f.registry.Logger().WithField("file", e.Source()).Error("Unable to read access rules probably because they were deleted, skipping those.")
continue
}
rules, err := f.decode(e.Reader())
if err != nil {
f.registry.Logger().WithField("file", e.Source()).WithError(err).Error("Unable to decode access rules, skipping those.")
continue
}
f.cacheRules(e.Source(), rules)
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local change").Error("Unable to update access rules.")
}
}
}
}
func (f *FetcherDefault) cacheRules(source string, rules []Rule) {
f.lock.Lock()
defer f.lock.Unlock()
f.cache[source] = rules
}
func (f *FetcherDefault) updateRulesFromCache(ctx context.Context) error {
f.lock.Lock()
defer f.lock.Unlock()
allRules := make([]Rule, 0)
for _, rules := range f.cache {
allRules = append(allRules, rules...)
}
return f.registry.RuleRepository().Set(ctx, allRules)
}
func (f *FetcherDefault) fetch(source url.URL) ([]Rule, error) {
f.registry.Logger().
WithField("location", source.String()).
Debugf("Fetching access rules from given location because something changed.")
switch source.Scheme {
case "azblob", "gs", "s3":
return f.fetchFromStorage(source)
case "http", "https":
return f.fetchRemote(source.String())
case "inline":
src, err := base64.StdEncoding.DecodeString(strings.Replace(source.String(), "inline://", "", 1))
if err != nil {
return nil, errors.Wrapf(err, "rule: %s", source.String())
}
return f.decode(bytes.NewBuffer(src))
}
return nil, errors.Errorf("rule: source url uses an unknown scheme: %s", source.String())
}
func (f *FetcherDefault) fetchRemote(source string) ([]Rule, error) {
res, err := f.hc.Get(source)
if err != nil {
return nil, errors.Wrapf(err, "rule: %s", source)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, errors.Errorf("rule: expected http response status code 200 but got %d when fetching: %s", res.StatusCode, source)
}
return f.decode(res.Body)
}
func (f *FetcherDefault) decode(r io.Reader) ([]Rule, error) {
b, err := io.ReadAll(r)
if err != nil {
return nil, errors.WithStack(err)
}
var ks []Rule
if json.Valid(b) {
d := json.NewDecoder(bytes.NewReader(b))
d.DisallowUnknownFields()
if err := d.Decode(&ks); err != nil {
return nil, errors.WithStack(err)
}
return ks, nil
}
if err := yaml.Unmarshal(b, &ks); err != nil {
return nil, errors.WithStack(err)
}
return ks, nil
}
func (f *FetcherDefault) fetchFromStorage(source url.URL) ([]Rule, error) {
ctx := context.Background()
bucket, err := f.mux.OpenBucket(ctx, source.Scheme+"://"+source.Host)
if err != nil {
return nil, err
}
defer bucket.Close()
r, err := bucket.NewReader(ctx, source.Path[1:], nil)
if err != nil {
return nil, err
}
defer r.Close()
return f.decode(r)
} | f.registry.Logger().WithField("repos", f.config.Get(configuration.AccessRuleRepositories)).Info("Detected access rule repository change, processing updates.") | random_line_split |
fetcher_default.go | // Copyright © 2023 Ory Corp
// SPDX-License-Identifier: Apache-2.0
package rule
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"io"
"net/http"
"net/url"
"path/filepath"
"strings"
"sync"
"time"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
"gocloud.dev/blob"
_ "gocloud.dev/blob/azureblob"
_ "gocloud.dev/blob/gcsblob"
_ "gocloud.dev/blob/s3blob"
"github.com/ory/x/httpx"
"github.com/ory/x/urlx"
"github.com/ory/x/watcherx"
"github.com/ory/oathkeeper/driver/configuration"
"github.com/ory/oathkeeper/internal/cloudstorage"
"github.com/ory/oathkeeper/x"
)
var _ Fetcher = new(FetcherDefault)
type fetcherRegistry interface {
x.RegistryLogger
RuleRepository() Repository
}
type FetcherDefault struct {
config configuration.Provider
registry fetcherRegistry
hc *http.Client
mux *blob.URLMux
cache map[string][]Rule
cancelWatchers map[string]context.CancelFunc
events chan watcherx.Event
lock sync.Mutex
}
func NewFetcherDefault(
config configuration.Provider,
registry fetcherRegistry,
) *FetcherDefault {
return &FetcherDefault{
registry: registry,
config: config,
mux: cloudstorage.NewURLMux(),
hc: httpx.NewResilientClient(httpx.ResilientClientWithConnectionTimeout(15 * time.Second)).StandardClient(),
cache: make(map[string][]Rule),
cancelWatchers: make(map[string]context.CancelFunc),
events: make(chan watcherx.Event),
}
}
func (f *FetcherDefault) SetURLMux(mux *blob.URLMux) {
f.mux = mux
}
func splitLocalRemoteRepos(ruleRepos []url.URL) (files []string, nonFiles []url.URL) {
files = make([]string, 0, len(ruleRepos))
nonFiles = make([]url.URL, 0, len(ruleRepos))
for _, repo := range ruleRepos {
if repo.Scheme == "file" || repo.Scheme == "" {
files = append(files,
filepath.Clean(
urlx.GetURLFilePath(&repo)))
} else {
nonFiles = append(nonFiles, repo)
}
}
return files, nonFiles
}
// watchLocalFiles watches all files that are configured in the config and are not watched already.
// It also cancels watchers for files that are no longer configured. This function is idempotent.
func (f *FetcherDefault) watchLocalFiles(ctx context.Context) {
f.lock.Lock()
repoChanged := false
cancelWatchers := make(map[string]context.CancelFunc, len(f.cancelWatchers))
localFiles, _ := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
for _, fp := range localFiles {
if cancel, ok := f.cancelWatchers[fp]; !ok {
// watch all files we are not yet watching
repoChanged = true
ctx, cancelWatchers[fp] = context.WithCancel(ctx)
w, err := watcherx.WatchFile(ctx, fp, f.events)
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to watch file, ignoring it.")
continue
}
// we force reading the files
done, err := w.DispatchNow()
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to read file, ignoring it.")
continue
}
go func() { <-done }() // we do not need to wait here, but we need to clear the channel
} else {
// keep watching files we are already watching
cancelWatchers[fp] = cancel
}
}
// cancel watchers for files we are no longer watching
for fp, cancel := range f.cancelWatchers {
if _, ok := cancelWatchers[fp]; !ok {
f.registry.Logger().WithField("file", fp).Info("Stopped watching access rule file.")
repoChanged = true
cancel()
delete(f.cache, fp)
}
}
f.cancelWatchers = cancelWatchers
f.lock.Unlock() // release lock before processing events
if repoChanged {
f.registry.Logger().WithField("repos", f.config.Get(configuration.AccessRuleRepositories)).Info("Detected access rule repository change, processing updates.")
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local repo change").Error("Unable to update access rules.")
}
}
}
func (f *FetcherDefault) Watch(ctx context.Context) error {
f.watchLocalFiles(ctx)
getRemoteRepos := func() map[url.URL]struct{} {
_, remoteRepos := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
repos := make(map[url.URL]struct{}, len(remoteRepos))
for _, repo := range remoteRepos {
repos[repo] = struct{}{}
}
return repos
}
// capture the previous config values to detect changes, and trigger initial processing
strategy := f.config.AccessRuleMatchingStrategy()
if err := f.processStrategyUpdate(ctx, strategy); err != nil {
return err
}
remoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, nil, remoteRepos); err != nil {
return err
}
f.config.AddWatcher(func(_ watcherx.Event, err error) {
if err != nil {
return
}
// watch files that need to be watched
f.watchLocalFiles(ctx)
// update the matching strategy if it changed
if newStrategy := f.config.AccessRuleMatchingStrategy(); newStrategy != strategy {
f.registry.Logger().WithField("strategy", newStrategy).Info("Detected access rule matching strategy change, processing updates.")
if err := f.processStrategyUpdate(ctx, newStrategy); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update access rule matching strategy.")
} else {
strategy = newStrategy
}
}
// update & fetch the remote repos if they changed
newRemoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, remoteRepos, newRemoteRepos); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update remote access rule repository config.")
}
remoteRepos = newRemoteRepos
})
go f.processLocalUpdates(ctx)
return nil
}
func (f *FetcherDefault) processStrategyUpdate(ctx context.Context, newValue configuration.MatchingStrategy) error {
if err := f.registry.RuleRepository().SetMatchingStrategy(ctx, newValue); err != nil {
return err
}
return nil
}
func (f *FetcherDefault) processRemoteRepoUpdate(ctx context.Context, oldRepos, newRepos map[url.URL]struct{}) error {
repoChanged := false
for repo := range newRepos {
if _, ok := f.cache[repo.String()]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("New repo detected, fetching access rules.")
rules, err := f.fetch(repo)
if err != nil {
f.registry.Logger().WithError(err).WithField("repo", repo.String()).Error("Unable to fetch access rules.")
return err
}
f.cacheRules(repo.String(), rules)
}
}
for repo := range oldRepos {
if _, ok := newRepos[repo]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("Repo was removed, removing access rules.")
f.lock.Lock()
delete(f.cache, repo.String())
f.lock.Unlock()
}
}
if repoChanged {
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "remote change").Error("Unable to update access rules.")
return err
}
}
return nil
}
func (f *FetcherDefault) processLocalUpdates(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case e, ok := <-f.events:
if !ok {
// channel was closed
return
}
f.registry.Logger().
WithField("event", "fsnotify").
WithField("file", e.Source()).
Info("Detected file change for access rules. Triggering a reload.")
if e.Reader() == nil {
f.registry.Logger().WithField("file", e.Source()).Error("Unable to read access rules probably because they were deleted, skipping those.")
continue
}
rules, err := f.decode(e.Reader())
if err != nil {
f.registry.Logger().WithField("file", e.Source()).WithError(err).Error("Unable to decode access rules, skipping those.")
continue
}
f.cacheRules(e.Source(), rules)
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local change").Error("Unable to update access rules.")
}
}
}
}
func (f *FetcherDefault) cacheRules(source string, rules []Rule) {
f.lock.Lock()
defer f.lock.Unlock()
f.cache[source] = rules
}
func (f *FetcherDefault) updateRulesFromCache(ctx context.Context) error {
f.lock.Lock()
defer f.lock.Unlock()
allRules := make([]Rule, 0)
for _, rules := range f.cache {
allRules = append(allRules, rules...)
}
return f.registry.RuleRepository().Set(ctx, allRules)
}
func (f *FetcherDefault) fetch(source url.URL) ([]Rule, error) {
f.registry.Logger().
WithField("location", source.String()).
Debugf("Fetching access rules from given location because something changed.")
switch source.Scheme {
case "azblob", "gs", "s3":
return f.fetchFromStorage(source)
case "http", "https":
return f.fetchRemote(source.String())
case "inline":
src, err := base64.StdEncoding.DecodeString(strings.Replace(source.String(), "inline://", "", 1))
if err != nil {
return nil, errors.Wrapf(err, "rule: %s", source.String())
}
return f.decode(bytes.NewBuffer(src))
}
return nil, errors.Errorf("rule: source url uses an unknown scheme: %s", source.String())
}
func (f *FetcherDefault) fetchRemote(source string) ([]Rule, error) {
res, err := f.hc.Get(source)
if err != nil {
return nil, errors.Wrapf(err, "rule: %s", source)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, errors.Errorf("rule: expected http response status code 200 but got %d when fetching: %s", res.StatusCode, source)
}
return f.decode(res.Body)
}
func (f *FetcherDefault) decode(r io.Reader) ([]Rule, error) { |
func (f *FetcherDefault) fetchFromStorage(source url.URL) ([]Rule, error) {
ctx := context.Background()
bucket, err := f.mux.OpenBucket(ctx, source.Scheme+"://"+source.Host)
if err != nil {
return nil, err
}
defer bucket.Close()
r, err := bucket.NewReader(ctx, source.Path[1:], nil)
if err != nil {
return nil, err
}
defer r.Close()
return f.decode(r)
}
|
b, err := io.ReadAll(r)
if err != nil {
return nil, errors.WithStack(err)
}
var ks []Rule
if json.Valid(b) {
d := json.NewDecoder(bytes.NewReader(b))
d.DisallowUnknownFields()
if err := d.Decode(&ks); err != nil {
return nil, errors.WithStack(err)
}
return ks, nil
}
if err := yaml.Unmarshal(b, &ks); err != nil {
return nil, errors.WithStack(err)
}
return ks, nil
}
| identifier_body |
fetcher_default.go | // Copyright © 2023 Ory Corp
// SPDX-License-Identifier: Apache-2.0
package rule
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"io"
"net/http"
"net/url"
"path/filepath"
"strings"
"sync"
"time"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
"gocloud.dev/blob"
_ "gocloud.dev/blob/azureblob"
_ "gocloud.dev/blob/gcsblob"
_ "gocloud.dev/blob/s3blob"
"github.com/ory/x/httpx"
"github.com/ory/x/urlx"
"github.com/ory/x/watcherx"
"github.com/ory/oathkeeper/driver/configuration"
"github.com/ory/oathkeeper/internal/cloudstorage"
"github.com/ory/oathkeeper/x"
)
var _ Fetcher = new(FetcherDefault)
type fetcherRegistry interface {
x.RegistryLogger
RuleRepository() Repository
}
type FetcherDefault struct {
config configuration.Provider
registry fetcherRegistry
hc *http.Client
mux *blob.URLMux
cache map[string][]Rule
cancelWatchers map[string]context.CancelFunc
events chan watcherx.Event
lock sync.Mutex
}
func NewFetcherDefault(
config configuration.Provider,
registry fetcherRegistry,
) *FetcherDefault {
return &FetcherDefault{
registry: registry,
config: config,
mux: cloudstorage.NewURLMux(),
hc: httpx.NewResilientClient(httpx.ResilientClientWithConnectionTimeout(15 * time.Second)).StandardClient(),
cache: make(map[string][]Rule),
cancelWatchers: make(map[string]context.CancelFunc),
events: make(chan watcherx.Event),
}
}
func (f *FetcherDefault) SetURLMux(mux *blob.URLMux) {
f.mux = mux
}
func splitLocalRemoteRepos(ruleRepos []url.URL) (files []string, nonFiles []url.URL) {
files = make([]string, 0, len(ruleRepos))
nonFiles = make([]url.URL, 0, len(ruleRepos))
for _, repo := range ruleRepos {
if repo.Scheme == "file" || repo.Scheme == "" {
files = append(files,
filepath.Clean(
urlx.GetURLFilePath(&repo)))
} else {
nonFiles = append(nonFiles, repo)
}
}
return files, nonFiles
}
// watchLocalFiles watches all files that are configured in the config and are not watched already.
// It also cancels watchers for files that are no longer configured. This function is idempotent.
func (f *FetcherDefault) watchLocalFiles(ctx context.Context) {
f.lock.Lock()
repoChanged := false
cancelWatchers := make(map[string]context.CancelFunc, len(f.cancelWatchers))
localFiles, _ := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
for _, fp := range localFiles {
if cancel, ok := f.cancelWatchers[fp]; !ok {
// watch all files we are not yet watching
repoChanged = true
ctx, cancelWatchers[fp] = context.WithCancel(ctx)
w, err := watcherx.WatchFile(ctx, fp, f.events)
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to watch file, ignoring it.")
continue
}
// we force reading the files
done, err := w.DispatchNow()
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to read file, ignoring it.")
continue
}
go func() { <-done }() // we do not need to wait here, but we need to clear the channel
} else {
// keep watching files we are already watching
cancelWatchers[fp] = cancel
}
}
// cancel watchers for files we are no longer watching
for fp, cancel := range f.cancelWatchers {
if _, ok := cancelWatchers[fp]; !ok {
f.registry.Logger().WithField("file", fp).Info("Stopped watching access rule file.")
repoChanged = true
cancel()
delete(f.cache, fp)
}
}
f.cancelWatchers = cancelWatchers
f.lock.Unlock() // release lock before processing events
if repoChanged {
f.registry.Logger().WithField("repos", f.config.Get(configuration.AccessRuleRepositories)).Info("Detected access rule repository change, processing updates.")
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local repo change").Error("Unable to update access rules.")
}
}
}
func (f *FetcherDefault) Watch(ctx context.Context) error {
f.watchLocalFiles(ctx)
getRemoteRepos := func() map[url.URL]struct{} {
_, remoteRepos := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
repos := make(map[url.URL]struct{}, len(remoteRepos))
for _, repo := range remoteRepos {
repos[repo] = struct{}{}
}
return repos
}
// capture the previous config values to detect changes, and trigger initial processing
strategy := f.config.AccessRuleMatchingStrategy()
if err := f.processStrategyUpdate(ctx, strategy); err != nil {
return err
}
remoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, nil, remoteRepos); err != nil {
return err
}
f.config.AddWatcher(func(_ watcherx.Event, err error) {
if err != nil {
return
}
// watch files that need to be watched
f.watchLocalFiles(ctx)
// update the matching strategy if it changed
if newStrategy := f.config.AccessRuleMatchingStrategy(); newStrategy != strategy {
f.registry.Logger().WithField("strategy", newStrategy).Info("Detected access rule matching strategy change, processing updates.")
if err := f.processStrategyUpdate(ctx, newStrategy); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update access rule matching strategy.")
} else {
strategy = newStrategy
}
}
// update & fetch the remote repos if they changed
newRemoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, remoteRepos, newRemoteRepos); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update remote access rule repository config.")
}
remoteRepos = newRemoteRepos
})
go f.processLocalUpdates(ctx)
return nil
}
func (f *FetcherDefault) processStrategyUpdate(ctx context.Context, newValue configuration.MatchingStrategy) error {
if err := f.registry.RuleRepository().SetMatchingStrategy(ctx, newValue); err != nil {
return err
}
return nil
}
func (f *FetcherDefault) p | ctx context.Context, oldRepos, newRepos map[url.URL]struct{}) error {
repoChanged := false
for repo := range newRepos {
if _, ok := f.cache[repo.String()]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("New repo detected, fetching access rules.")
rules, err := f.fetch(repo)
if err != nil {
f.registry.Logger().WithError(err).WithField("repo", repo.String()).Error("Unable to fetch access rules.")
return err
}
f.cacheRules(repo.String(), rules)
}
}
for repo := range oldRepos {
if _, ok := newRepos[repo]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("Repo was removed, removing access rules.")
f.lock.Lock()
delete(f.cache, repo.String())
f.lock.Unlock()
}
}
if repoChanged {
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "remote change").Error("Unable to update access rules.")
return err
}
}
return nil
}
func (f *FetcherDefault) processLocalUpdates(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case e, ok := <-f.events:
if !ok {
// channel was closed
return
}
f.registry.Logger().
WithField("event", "fsnotify").
WithField("file", e.Source()).
Info("Detected file change for access rules. Triggering a reload.")
if e.Reader() == nil {
f.registry.Logger().WithField("file", e.Source()).Error("Unable to read access rules probably because they were deleted, skipping those.")
continue
}
rules, err := f.decode(e.Reader())
if err != nil {
f.registry.Logger().WithField("file", e.Source()).WithError(err).Error("Unable to decode access rules, skipping those.")
continue
}
f.cacheRules(e.Source(), rules)
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local change").Error("Unable to update access rules.")
}
}
}
}
func (f *FetcherDefault) cacheRules(source string, rules []Rule) {
f.lock.Lock()
defer f.lock.Unlock()
f.cache[source] = rules
}
func (f *FetcherDefault) updateRulesFromCache(ctx context.Context) error {
f.lock.Lock()
defer f.lock.Unlock()
allRules := make([]Rule, 0)
for _, rules := range f.cache {
allRules = append(allRules, rules...)
}
return f.registry.RuleRepository().Set(ctx, allRules)
}
func (f *FetcherDefault) fetch(source url.URL) ([]Rule, error) {
f.registry.Logger().
WithField("location", source.String()).
Debugf("Fetching access rules from given location because something changed.")
switch source.Scheme {
case "azblob", "gs", "s3":
return f.fetchFromStorage(source)
case "http", "https":
return f.fetchRemote(source.String())
case "inline":
src, err := base64.StdEncoding.DecodeString(strings.Replace(source.String(), "inline://", "", 1))
if err != nil {
return nil, errors.Wrapf(err, "rule: %s", source.String())
}
return f.decode(bytes.NewBuffer(src))
}
return nil, errors.Errorf("rule: source url uses an unknown scheme: %s", source.String())
}
func (f *FetcherDefault) fetchRemote(source string) ([]Rule, error) {
res, err := f.hc.Get(source)
if err != nil {
return nil, errors.Wrapf(err, "rule: %s", source)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, errors.Errorf("rule: expected http response status code 200 but got %d when fetching: %s", res.StatusCode, source)
}
return f.decode(res.Body)
}
func (f *FetcherDefault) decode(r io.Reader) ([]Rule, error) {
b, err := io.ReadAll(r)
if err != nil {
return nil, errors.WithStack(err)
}
var ks []Rule
if json.Valid(b) {
d := json.NewDecoder(bytes.NewReader(b))
d.DisallowUnknownFields()
if err := d.Decode(&ks); err != nil {
return nil, errors.WithStack(err)
}
return ks, nil
}
if err := yaml.Unmarshal(b, &ks); err != nil {
return nil, errors.WithStack(err)
}
return ks, nil
}
func (f *FetcherDefault) fetchFromStorage(source url.URL) ([]Rule, error) {
ctx := context.Background()
bucket, err := f.mux.OpenBucket(ctx, source.Scheme+"://"+source.Host)
if err != nil {
return nil, err
}
defer bucket.Close()
r, err := bucket.NewReader(ctx, source.Path[1:], nil)
if err != nil {
return nil, err
}
defer r.Close()
return f.decode(r)
}
| rocessRemoteRepoUpdate( | identifier_name |
tunnel.py | import sys
import os
import socket
import time
import struct
import select
import logging
from threading import Thread
from threading import Lock
from threading import Condition
from lowpan import util
import lowpan
##@todo Find a better home for these identifiers (controller)
RCV_SIZE_DEFAULT = 32768
LISTEN_QUEUE_SIZE = 1
class VirtualTunnel(Thread):
"""
Class abstracting the control interface to the switch.
For receiving messages, two mechanism will be implemented. First,
query the interface with poll. Second, register to have a
function called by message type. The callback is passed the
message type as well as the raw packet (or message object)
One of the main purposes of this object is to translate between network
and host byte order. 'Above' this object, things should be in host
byte order.
@todo Consider using SocketServer for listening socket
@todo Test transaction code
@var rcv_size The receive size to use for receive calls
@var max_pkts The max size of the receive queue
@var keep_alive If true, listen for echo requests and respond w/
echo replies
@var initial_hello If true, will send a hello message immediately
upon connecting to the switch
@var switch If not None, do an active connection to the switch
@var host The host to use for connect
@var port The port to connect on
@var packets_total Total number of packets received
@var packets_expired Number of packets popped from queue as queue full
@var packets_handled Number of packets handled by something
@var dbg_state Debug indication of state
"""
def __init__(self, bdg_unix_addr = None ,tun_unix_addr = 'uds_tunnel', host='192.168.2.200', port=1024, max_pkts=1024):
Thread.__init__(self)
# Socket related
self.rcv_size = RCV_SIZE_DEFAULT
self.listen_socket = None
self.switch_socket = None
self.switch_addr = None
self.connect_cv = Condition()
self.message_cv = Condition()
self.tx_lock = Lock()
# Used to wake up the event loop from another thread
self.waker = util.EventDescriptor()
# Counters
self.socket_errors = 0
self.parse_errors = 0
self.packets_total = 0
self.packets_expired = 0
self.packets_handled = 0
self.poll_discards = 0
# State
self.sync = Lock()
self.handlers = {}
self.keep_alive = False
self.active = True
self.initial_hello = True
# OpenFlow message/packet queue
# Protected by the packets_cv lock / condition variable
self.packets = []
self.packets_cv = Condition()
self.packet_in_count = 0
# Settings
self.max_pkts = max_pkts
self.bdg_unix_addr = bdg_unix_addr
self.tun_unix_addr = tun_unix_addr
self.host = host
self.port = port
self.dbg_state = "init"
self.logger = logging.getLogger("VirtualTunnel")
self.filter_packet_in = False # Drop "excessive" packet ins
self.pkt_in_run = 0 # Count on run of packet ins
self.pkt_in_filter_limit = 50 # Count on run of packet ins
self.pkt_in_dropped = 0 # Total dropped packet ins
self.transact_to = 15 # Transact timeout default value; add to config
# Transaction and message type waiting variables
# xid_cv: Condition variable (semaphore) for packet waiters
# xid: Transaction ID being waited on
# xid_response: Transaction response message
self.xid_cv = Condition()
self.xid = None
self.xid_response = None
self.debug = False
self.buffered_input = ""
# Create listen socket
self.logger.info("Create/listen at " + self.host + ":" +
str(self.port))
ai = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC,
socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
# Use first returned addrinfo
(family, socktype, proto, name, sockaddr) = ai[0]
self.listen_socket = socket.socket(family, socktype)
self.listen_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.listen_socket.bind(sockaddr)
self.switch_socket = self.listen_socket
# Make sure the socket does not already exist
try:
os.unlink(self.tun_unix_addr)
except OSError:
if os.path.exists(self.tun_unix_addr):
raise
self.bridge_socket = socket.socket(socket.AF_UNIX,socket.SOCK_DGRAM)
# Bind the socket to the port
self.logger.info("Create/listen at " + str(self.tun_unix_addr))
self.bridge_socket.bind(self.tun_unix_addr)
def filter_packet(self, rawmsg, hdr):
"""
Check if packet should be filtered
Currently filters packet in messages
@return Boolean, True if packet should be dropped
"""
# XXX didn't actually check for packet-in...
return False
# Add check for packet in and rate limit
if self.filter_packet_in:
# If we were dropping packets, report number dropped
# TODO dont drop expected packet ins
if self.pkt_in_run > self.pkt_in_filter_limit:
self.logger.debug("Dropped %d packet ins (%d total)"
% ((self.pkt_in_run -
self.pkt_in_filter_limit),
self.pkt_in_dropped))
self.pkt_in_run = 0
return False
def _pkt_handle(self, pkt):
"""
Check for all packet handling conditions
Parse and verify message
Check if XID matches something waiting
Check if message is being expected for a poll operation
Check if keep alive is on and message is an echo request
Check if any registered handler wants the packet
Enqueue if none of those conditions is met
an echo request in case keep_alive is true, followed by
registered message handlers.
@param pkt The raw packet (string) which may contain multiple OF msgs
"""
# snag any left over data from last read()
# Parse the header to get type
offset, payload_len, subtype, nxp_sniffer = lowpan.message.parse_header(pkt[0])
# Extract the raw message bytes
rawmsg = pkt[0][offset : offset + payload_len]
if self.debug:
print(pkt[1])
print(util.hex_dump_buffer(rawmsg))
# Now check for message handlers; preference is given to
# handlers for a specific packet
handled = False
# Send to bridge socket
if self.bdg_unix_addr:
self.bridge_socket.sendto(rawmsg,self.bdg_unix_addr)
handled = True
if subtype in self.handlers.keys():
handled = self.handlers[subtype](self, nxp_sniffer, rawmsg)
if not handled and ("all" in self.handlers.keys()):
handled = self.handlers["all"](self, nxp_sniffer, rawmsg)
if not handled: # Not handled, enqueue
with self.packets_cv:
if len(self.packets) >= self.max_pkts:
self.packets.pop(0)
self.packets_expired += 1
self.packets.append((nxp_sniffer, rawmsg))
self.packets_cv.notify_all()
self.packets_total += 1
else:
self.packets_handled += 1
self.logger.debug("Message handled by callback")
def _socket_ready_handle(self, s):
"""
Handle an input-ready socket
@param s The socket object that is ready
@returns 0 on success, -1 on error
"""
if s and s == self.switch_socket:
for idx in range(3): # debug: try a couple of times
try:
pkt = self.switch_socket.recvfrom(self.rcv_size)
except:
self.logger.warning("Error on switch read")
return -1
if not self.active:
return 0
if len(pkt) == 0:
self.logger.warning("Zero-length switch read, %d" % idx)
else:
break
if len(pkt) == 0: # Still no packet
self.logger.warning("Zero-length switch read; closing cxn")
self.logger.info(str(self))
return -1
self._pkt_handle(pkt)
elif s and s == self.waker:
self.waker.wait()
else:
self.logger.error("Unknown socket ready: " + str(s))
return -1
return 0
def active_connect(self):
"""
Actively connect to a switch IP addr
"""
try:
self.logger.info("Trying active connection to %s" % self.switch)
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.connect((self.switch, self.port))
self.logger.info("Connected to " + self.switch + " on " +
str(self.port))
soc.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
self.switch_addr = (self.switch, self.port)
return soc
except (StandardError, socket.error) as e:
self.logger.error("Could not connect to %s at %d:: %s" %
(self.switch, self.port, str(e)))
return None
def wakeup(self):
"""
Wake up the event loop, presumably from another thread.
"""
self.waker.notify()
def sockets(self):
"""
Return list of sockets to select on.
"""
socs = [self.listen_socket, self.bridge_socket, self.waker]
return [x for x in socs if x]
def run(self):
"""
Activity function for class
Assumes connection to switch already exists. Listens on
switch_socket for messages until an error (or zero len pkt)
occurs.
When there is a message on the socket, check for handlers; queue the
packet if no one handles the packet.
See note for controller describing the limitation of a single
connection for now.
"""
self.dbg_state = "running"
while self.active:
try:
sel_in, sel_out, sel_err = \
select.select(self.sockets(), [], self.sockets(), 1)
except:
print( sys.exc_info())
self.logger.error("Select error, disconnecting")
self.disconnect()
for s in sel_err:
self.logger.error("Got socket error on: " + str(s) + ", disconnecting")
self.disconnect()
for s in sel_in:
if self._socket_ready_handle(s) == -1:
self.disconnect()
# End of main loop
self.dbg_state = "closing"
self.logger.info("Exiting controller thread")
self.shutdown()
def connect(self, timeout=-1):
"""
Connect to the switch
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if connected
"""
pass
def disconnect(self, timeout=-1):
"""
If connected to a switch, disconnect.
"""
if self.switch_socket:
self.switch_socket.close()
self.switch_socket = None
self.switch_addr = None
with self.packets_cv:
self.packets = []
with self.connect_cv:
self.connect_cv.notifyAll()
if self.bridge_socket:
self.bridge_socket.close()
def wait_disconnected(self, timeout=-1):
"""
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if disconnected
"""
with self.connect_cv:
util.timed_wait(self.connect_cv,
lambda: True if not self.switch_socket else None,
timeout=timeout)
return self.switch_socket is None
def kill(self):
"""
Force the controller thread to quit
"""
self.active = False
self.wakeup()
self.join()
def shutdown(self):
"""
Shutdown the controller closing all sockets
@todo Might want to synchronize shutdown with self.sync...
"""
self.active = False
try:
self.listen_socket.shutdown(socket.SHUT_RDWR)
except:
self.logger.info("Ignoring listen soc shutdown error")
self.listen_socket = None
with self.connect_cv:
self.connect_cv.notifyAll()
self.wakeup()
self.dbg_state = "down"
def register(self, msg_type, handler):
"""
Register a callback to receive a specific message type.
Only one handler may be registered for a given message type.
WARNING: A lock is held during the handler call back, so
the handler should not make any blocking calls
@param msg_type The type of message to receive. May be DEFAULT
for all non-handled packets. The special type, the string "all"
will send all packets to the handler.
@param handler The function to call when a message of the given
type is received.
"""
# Should check type is valid
if not handler and msg_type in self.handlers.keys():
del self.handlers[msg_type]
return
self.handlers[msg_type] = handler
def poll(self, exp_msg=None, timeout=-1):
"""
Wait for the next OF message received from the switch.
@param exp_msg If set, return only when this type of message
is received (unless timeout occurs).
@param timeout Maximum number of seconds to wait for the message.
Pass -1 for the default timeout.
@retval A pair (msg, pkt) where msg is a message object and pkt
the string representing the packet as received from the socket.
This allows additional parsing by the receiver if necessary.
The data members in the message are in host endian order.
If an error occurs, (None, None) is returned
"""
if exp_msg is None:
self.logger.warn("DEPRECATED polling for any message class")
klass = None
else:
raise ValueError("Unexpected exp_msg argument %r" % exp_msg)
# Take the packet from the queue
def grab():
for i, (msg, pkt) in enumerate(self.packets):
if klass is None or isinstance(msg, klass):
self.logger.debug("Got %s message", msg.__class__.__name__)
return self.packets.pop(i)
# Not found
return None
with self.packets_cv:
ret = util.timed_wait(self.packets_cv, grab, timeout=timeout)
if ret != None:
(msg, pkt) = ret
return (msg, pkt)
else:
return (None, None)
def transact(self, msg, timeout=-1):
"""
Run a message transaction with the switch
Send the message in msg and wait for a reply with a matching
transaction id. Transactions have the highest priority in
received message handling.
@param msg The message object to send; must not be a string
@param timeout The timeout in seconds; if -1 use default.
"""
if msg.xid == None:
msg.xid = util.gen_xid()
self.logger.debug("Running transaction %d" % msg.xid)
with self.xid_cv:
if self.xid:
self.logger.error("Can only run one transaction at a time")
return (None, None)
self.xid = msg.xid
self.xid_response = None
self.message_send(msg)
self.logger.debug("Waiting for transaction %d" % msg.xid)
util.timed_wait(self.xid_cv, lambda: self.xid_response, timeout=timeout)
if self.xid_response:
(resp, pkt) = self.xid_response
self.xid_response = None
else:
(resp, pkt) = (None, None)
if resp is None:
self.logger.warning("No response for xid " + str(self.xid))
return (resp, pkt)
def message_send(self, msg):
"""
Send the message to the switch
@param msg A string or OpenFlow message object to be forwarded to
the switch.
"""
if not self.switch_socket:
# Sending a string indicates the message is ready to go
raise Exception("no socket")
if msg.xid == None:
msg.xid = util.gen_xid()
outpkt = msg.pack()
self.logger.debug("Msg out: version %d class %s len %d xid %d",
msg.version, type(msg).__name__, len(outpkt), msg.xid)
with self.tx_lock:
if self.switch_socket.sendall(outpkt) is not None:
raise AssertionError("failed to send message to switch")
return 0 # for backwards compatibility
def clear_queue(self):
"""
Clear the input queue and report the number of messages
that were in it
"""
enqueued_pkt_count = len(self.packets)
with self.packets_cv:
self.packets = []
return enqueued_pkt_count
def __str__(self):
string = "Controller:\n"
string += " state " + self.dbg_state + "\n"
string += " switch_addr " + str(self.switch_addr) + "\n"
string += " pending pkts " + str(len(self.packets)) + "\n"
string += " total pkts " + str(self.packets_total) + "\n"
string += " expired pkts " + str(self.packets_expired) + "\n"
string += " handled pkts " + str(self.packets_handled) + "\n"
string += " poll discards " + str(self.poll_discards) + "\n"
string += " parse errors " + str(self.parse_errors) + "\n"
string += " sock errrors " + str(self.socket_errors) + "\n"
string += " max pkts " + str(self.max_pkts) + "\n"
string += " target switch " + str(self.switch) + "\n"
string += " host " + str(self.host) + "\n"
string += " port " + str(self.port) + "\n"
string += " keep_alive " + str(self.keep_alive) + "\n"
string += " pkt_in_run " + str(self.pkt_in_run) + "\n"
string += " pkt_in_dropped " + str(self.pkt_in_dropped) + "\n"
return string
def | (self):
print(str(self))
def sample_handler(controller, msg, pkt):
"""
Sample message handler
This is the prototype for functions registered with the controller
class for packet reception
@param controller The controller calling the handler
@param msg The parsed message object
@param pkt The raw packet that was received on the socket. This is
in case the packet contains extra unparsed data.
@returns Boolean value indicating if the packet was handled. If
not handled, the packet is placed in the queue for pollers to received
"""
pass
| show | identifier_name |
tunnel.py | import sys
import os
import socket
import time
import struct
import select
import logging
from threading import Thread
from threading import Lock
from threading import Condition
from lowpan import util
import lowpan
##@todo Find a better home for these identifiers (controller)
RCV_SIZE_DEFAULT = 32768
LISTEN_QUEUE_SIZE = 1
class VirtualTunnel(Thread):
"""
Class abstracting the control interface to the switch.
For receiving messages, two mechanism will be implemented. First,
query the interface with poll. Second, register to have a
function called by message type. The callback is passed the
message type as well as the raw packet (or message object)
One of the main purposes of this object is to translate between network
and host byte order. 'Above' this object, things should be in host
byte order.
@todo Consider using SocketServer for listening socket
@todo Test transaction code
@var rcv_size The receive size to use for receive calls
@var max_pkts The max size of the receive queue
@var keep_alive If true, listen for echo requests and respond w/
echo replies
@var initial_hello If true, will send a hello message immediately
upon connecting to the switch
@var switch If not None, do an active connection to the switch
@var host The host to use for connect
@var port The port to connect on
@var packets_total Total number of packets received
@var packets_expired Number of packets popped from queue as queue full
@var packets_handled Number of packets handled by something
@var dbg_state Debug indication of state
"""
def __init__(self, bdg_unix_addr = None ,tun_unix_addr = 'uds_tunnel', host='192.168.2.200', port=1024, max_pkts=1024):
Thread.__init__(self)
# Socket related
self.rcv_size = RCV_SIZE_DEFAULT
self.listen_socket = None
self.switch_socket = None
self.switch_addr = None
self.connect_cv = Condition()
self.message_cv = Condition()
self.tx_lock = Lock()
# Used to wake up the event loop from another thread
self.waker = util.EventDescriptor()
# Counters
self.socket_errors = 0
self.parse_errors = 0
self.packets_total = 0
self.packets_expired = 0
self.packets_handled = 0
self.poll_discards = 0
# State
self.sync = Lock()
self.handlers = {}
self.keep_alive = False
self.active = True
self.initial_hello = True
# OpenFlow message/packet queue
# Protected by the packets_cv lock / condition variable
self.packets = []
self.packets_cv = Condition()
self.packet_in_count = 0
# Settings
self.max_pkts = max_pkts
self.bdg_unix_addr = bdg_unix_addr
self.tun_unix_addr = tun_unix_addr
self.host = host
self.port = port
self.dbg_state = "init"
self.logger = logging.getLogger("VirtualTunnel")
self.filter_packet_in = False # Drop "excessive" packet ins
self.pkt_in_run = 0 # Count on run of packet ins
self.pkt_in_filter_limit = 50 # Count on run of packet ins
self.pkt_in_dropped = 0 # Total dropped packet ins
self.transact_to = 15 # Transact timeout default value; add to config
# Transaction and message type waiting variables
# xid_cv: Condition variable (semaphore) for packet waiters
# xid: Transaction ID being waited on
# xid_response: Transaction response message
self.xid_cv = Condition()
self.xid = None
self.xid_response = None
self.debug = False
self.buffered_input = ""
# Create listen socket
self.logger.info("Create/listen at " + self.host + ":" +
str(self.port))
ai = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC,
socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
# Use first returned addrinfo
(family, socktype, proto, name, sockaddr) = ai[0]
self.listen_socket = socket.socket(family, socktype)
self.listen_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.listen_socket.bind(sockaddr)
self.switch_socket = self.listen_socket
# Make sure the socket does not already exist
try:
os.unlink(self.tun_unix_addr)
except OSError:
if os.path.exists(self.tun_unix_addr):
raise
self.bridge_socket = socket.socket(socket.AF_UNIX,socket.SOCK_DGRAM)
# Bind the socket to the port
self.logger.info("Create/listen at " + str(self.tun_unix_addr))
self.bridge_socket.bind(self.tun_unix_addr)
def filter_packet(self, rawmsg, hdr):
"""
Check if packet should be filtered
Currently filters packet in messages
@return Boolean, True if packet should be dropped
"""
# XXX didn't actually check for packet-in...
return False
# Add check for packet in and rate limit
if self.filter_packet_in:
# If we were dropping packets, report number dropped
# TODO dont drop expected packet ins
if self.pkt_in_run > self.pkt_in_filter_limit:
self.logger.debug("Dropped %d packet ins (%d total)"
% ((self.pkt_in_run -
self.pkt_in_filter_limit),
self.pkt_in_dropped))
self.pkt_in_run = 0
return False
def _pkt_handle(self, pkt):
"""
Check for all packet handling conditions
Parse and verify message
Check if XID matches something waiting
Check if message is being expected for a poll operation
Check if keep alive is on and message is an echo request
Check if any registered handler wants the packet
Enqueue if none of those conditions is met
an echo request in case keep_alive is true, followed by
registered message handlers.
@param pkt The raw packet (string) which may contain multiple OF msgs
"""
# snag any left over data from last read()
# Parse the header to get type
offset, payload_len, subtype, nxp_sniffer = lowpan.message.parse_header(pkt[0])
# Extract the raw message bytes
rawmsg = pkt[0][offset : offset + payload_len]
if self.debug:
print(pkt[1])
print(util.hex_dump_buffer(rawmsg))
# Now check for message handlers; preference is given to
# handlers for a specific packet
handled = False
# Send to bridge socket
if self.bdg_unix_addr:
self.bridge_socket.sendto(rawmsg,self.bdg_unix_addr)
handled = True
if subtype in self.handlers.keys():
handled = self.handlers[subtype](self, nxp_sniffer, rawmsg)
if not handled and ("all" in self.handlers.keys()):
handled = self.handlers["all"](self, nxp_sniffer, rawmsg)
if not handled: # Not handled, enqueue
with self.packets_cv:
if len(self.packets) >= self.max_pkts:
self.packets.pop(0)
self.packets_expired += 1
self.packets.append((nxp_sniffer, rawmsg))
self.packets_cv.notify_all()
self.packets_total += 1
else:
self.packets_handled += 1
self.logger.debug("Message handled by callback")
def _socket_ready_handle(self, s):
"""
Handle an input-ready socket
@param s The socket object that is ready
@returns 0 on success, -1 on error
"""
if s and s == self.switch_socket:
for idx in range(3): # debug: try a couple of times
try:
pkt = self.switch_socket.recvfrom(self.rcv_size)
except:
self.logger.warning("Error on switch read")
return -1
if not self.active:
return 0
if len(pkt) == 0:
self.logger.warning("Zero-length switch read, %d" % idx)
else:
break
if len(pkt) == 0: # Still no packet
self.logger.warning("Zero-length switch read; closing cxn")
self.logger.info(str(self))
return -1
self._pkt_handle(pkt)
elif s and s == self.waker:
self.waker.wait()
else:
self.logger.error("Unknown socket ready: " + str(s))
return -1
return 0
def active_connect(self):
"""
Actively connect to a switch IP addr
"""
try:
self.logger.info("Trying active connection to %s" % self.switch)
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.connect((self.switch, self.port))
self.logger.info("Connected to " + self.switch + " on " +
str(self.port))
soc.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
self.switch_addr = (self.switch, self.port)
return soc
except (StandardError, socket.error) as e:
self.logger.error("Could not connect to %s at %d:: %s" %
(self.switch, self.port, str(e)))
return None
def wakeup(self):
"""
Wake up the event loop, presumably from another thread.
"""
self.waker.notify()
def sockets(self):
"""
Return list of sockets to select on.
"""
socs = [self.listen_socket, self.bridge_socket, self.waker]
return [x for x in socs if x]
def run(self):
"""
Activity function for class
Assumes connection to switch already exists. Listens on
switch_socket for messages until an error (or zero len pkt)
occurs.
When there is a message on the socket, check for handlers; queue the
packet if no one handles the packet.
See note for controller describing the limitation of a single
connection for now.
"""
self.dbg_state = "running"
while self.active:
try:
sel_in, sel_out, sel_err = \
select.select(self.sockets(), [], self.sockets(), 1)
except:
print( sys.exc_info())
self.logger.error("Select error, disconnecting")
self.disconnect()
for s in sel_err:
self.logger.error("Got socket error on: " + str(s) + ", disconnecting")
self.disconnect()
for s in sel_in:
if self._socket_ready_handle(s) == -1:
self.disconnect()
# End of main loop
self.dbg_state = "closing"
self.logger.info("Exiting controller thread")
self.shutdown()
def connect(self, timeout=-1):
"""
Connect to the switch
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if connected
"""
pass
def disconnect(self, timeout=-1):
"""
If connected to a switch, disconnect.
"""
if self.switch_socket:
self.switch_socket.close()
self.switch_socket = None
self.switch_addr = None
with self.packets_cv:
self.packets = []
with self.connect_cv:
self.connect_cv.notifyAll()
if self.bridge_socket:
self.bridge_socket.close()
def wait_disconnected(self, timeout=-1):
"""
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if disconnected
"""
with self.connect_cv:
util.timed_wait(self.connect_cv,
lambda: True if not self.switch_socket else None,
timeout=timeout)
return self.switch_socket is None
def kill(self):
"""
Force the controller thread to quit
"""
self.active = False
self.wakeup()
self.join()
def shutdown(self):
"""
Shutdown the controller closing all sockets
@todo Might want to synchronize shutdown with self.sync...
"""
self.active = False
try:
self.listen_socket.shutdown(socket.SHUT_RDWR)
except:
self.logger.info("Ignoring listen soc shutdown error")
self.listen_socket = None
with self.connect_cv:
self.connect_cv.notifyAll()
self.wakeup()
self.dbg_state = "down"
def register(self, msg_type, handler):
"""
Register a callback to receive a specific message type.
Only one handler may be registered for a given message type.
WARNING: A lock is held during the handler call back, so
the handler should not make any blocking calls
@param msg_type The type of message to receive. May be DEFAULT
for all non-handled packets. The special type, the string "all"
will send all packets to the handler.
@param handler The function to call when a message of the given
type is received.
"""
# Should check type is valid
if not handler and msg_type in self.handlers.keys():
del self.handlers[msg_type]
return
self.handlers[msg_type] = handler
def poll(self, exp_msg=None, timeout=-1):
"""
Wait for the next OF message received from the switch.
@param exp_msg If set, return only when this type of message
is received (unless timeout occurs).
@param timeout Maximum number of seconds to wait for the message.
Pass -1 for the default timeout.
@retval A pair (msg, pkt) where msg is a message object and pkt
the string representing the packet as received from the socket.
This allows additional parsing by the receiver if necessary.
The data members in the message are in host endian order.
If an error occurs, (None, None) is returned
"""
if exp_msg is None:
self.logger.warn("DEPRECATED polling for any message class")
klass = None
else:
raise ValueError("Unexpected exp_msg argument %r" % exp_msg)
# Take the packet from the queue
def grab():
for i, (msg, pkt) in enumerate(self.packets):
if klass is None or isinstance(msg, klass):
self.logger.debug("Got %s message", msg.__class__.__name__)
return self.packets.pop(i)
# Not found
return None
with self.packets_cv:
ret = util.timed_wait(self.packets_cv, grab, timeout=timeout)
if ret != None:
(msg, pkt) = ret
return (msg, pkt)
else:
return (None, None)
def transact(self, msg, timeout=-1):
"""
Run a message transaction with the switch
Send the message in msg and wait for a reply with a matching
transaction id. Transactions have the highest priority in
received message handling.
@param msg The message object to send; must not be a string
@param timeout The timeout in seconds; if -1 use default.
"""
if msg.xid == None:
msg.xid = util.gen_xid()
self.logger.debug("Running transaction %d" % msg.xid)
with self.xid_cv:
if self.xid:
self.logger.error("Can only run one transaction at a time")
return (None, None)
self.xid = msg.xid
self.xid_response = None
self.message_send(msg)
self.logger.debug("Waiting for transaction %d" % msg.xid)
util.timed_wait(self.xid_cv, lambda: self.xid_response, timeout=timeout)
if self.xid_response:
(resp, pkt) = self.xid_response
self.xid_response = None
else:
(resp, pkt) = (None, None)
if resp is None:
self.logger.warning("No response for xid " + str(self.xid))
return (resp, pkt)
def message_send(self, msg):
"""
Send the message to the switch
@param msg A string or OpenFlow message object to be forwarded to
the switch.
"""
if not self.switch_socket:
# Sending a string indicates the message is ready to go
raise Exception("no socket")
if msg.xid == None:
msg.xid = util.gen_xid()
outpkt = msg.pack()
self.logger.debug("Msg out: version %d class %s len %d xid %d",
msg.version, type(msg).__name__, len(outpkt), msg.xid)
with self.tx_lock:
if self.switch_socket.sendall(outpkt) is not None:
raise AssertionError("failed to send message to switch")
return 0 # for backwards compatibility
def clear_queue(self):
"""
Clear the input queue and report the number of messages
that were in it
"""
enqueued_pkt_count = len(self.packets)
with self.packets_cv:
self.packets = []
return enqueued_pkt_count
def __str__(self):
|
def show(self):
print(str(self))
def sample_handler(controller, msg, pkt):
"""
Sample message handler
This is the prototype for functions registered with the controller
class for packet reception
@param controller The controller calling the handler
@param msg The parsed message object
@param pkt The raw packet that was received on the socket. This is
in case the packet contains extra unparsed data.
@returns Boolean value indicating if the packet was handled. If
not handled, the packet is placed in the queue for pollers to received
"""
pass
| string = "Controller:\n"
string += " state " + self.dbg_state + "\n"
string += " switch_addr " + str(self.switch_addr) + "\n"
string += " pending pkts " + str(len(self.packets)) + "\n"
string += " total pkts " + str(self.packets_total) + "\n"
string += " expired pkts " + str(self.packets_expired) + "\n"
string += " handled pkts " + str(self.packets_handled) + "\n"
string += " poll discards " + str(self.poll_discards) + "\n"
string += " parse errors " + str(self.parse_errors) + "\n"
string += " sock errrors " + str(self.socket_errors) + "\n"
string += " max pkts " + str(self.max_pkts) + "\n"
string += " target switch " + str(self.switch) + "\n"
string += " host " + str(self.host) + "\n"
string += " port " + str(self.port) + "\n"
string += " keep_alive " + str(self.keep_alive) + "\n"
string += " pkt_in_run " + str(self.pkt_in_run) + "\n"
string += " pkt_in_dropped " + str(self.pkt_in_dropped) + "\n"
return string | identifier_body |
tunnel.py | import sys
import os
import socket
import time
import struct
import select
import logging
from threading import Thread
from threading import Lock
from threading import Condition
from lowpan import util
import lowpan
##@todo Find a better home for these identifiers (controller)
RCV_SIZE_DEFAULT = 32768
LISTEN_QUEUE_SIZE = 1
class VirtualTunnel(Thread):
"""
Class abstracting the control interface to the switch.
For receiving messages, two mechanism will be implemented. First,
query the interface with poll. Second, register to have a
function called by message type. The callback is passed the
message type as well as the raw packet (or message object)
One of the main purposes of this object is to translate between network
and host byte order. 'Above' this object, things should be in host
byte order.
@todo Consider using SocketServer for listening socket
@todo Test transaction code
@var rcv_size The receive size to use for receive calls
@var max_pkts The max size of the receive queue
@var keep_alive If true, listen for echo requests and respond w/
echo replies
@var initial_hello If true, will send a hello message immediately
upon connecting to the switch
@var switch If not None, do an active connection to the switch
@var host The host to use for connect
@var port The port to connect on
@var packets_total Total number of packets received
@var packets_expired Number of packets popped from queue as queue full
@var packets_handled Number of packets handled by something
@var dbg_state Debug indication of state
"""
def __init__(self, bdg_unix_addr = None ,tun_unix_addr = 'uds_tunnel', host='192.168.2.200', port=1024, max_pkts=1024):
Thread.__init__(self)
# Socket related
self.rcv_size = RCV_SIZE_DEFAULT
self.listen_socket = None
self.switch_socket = None
self.switch_addr = None
self.connect_cv = Condition()
self.message_cv = Condition()
self.tx_lock = Lock()
# Used to wake up the event loop from another thread
self.waker = util.EventDescriptor()
# Counters
self.socket_errors = 0
self.parse_errors = 0
self.packets_total = 0
self.packets_expired = 0
self.packets_handled = 0
self.poll_discards = 0
# State
self.sync = Lock()
self.handlers = {}
self.keep_alive = False
self.active = True
self.initial_hello = True
# OpenFlow message/packet queue
# Protected by the packets_cv lock / condition variable
self.packets = []
self.packets_cv = Condition()
self.packet_in_count = 0
# Settings
self.max_pkts = max_pkts
self.bdg_unix_addr = bdg_unix_addr
self.tun_unix_addr = tun_unix_addr
self.host = host
self.port = port
self.dbg_state = "init"
self.logger = logging.getLogger("VirtualTunnel")
self.filter_packet_in = False # Drop "excessive" packet ins
self.pkt_in_run = 0 # Count on run of packet ins
self.pkt_in_filter_limit = 50 # Count on run of packet ins
self.pkt_in_dropped = 0 # Total dropped packet ins
self.transact_to = 15 # Transact timeout default value; add to config
# Transaction and message type waiting variables
# xid_cv: Condition variable (semaphore) for packet waiters
# xid: Transaction ID being waited on
# xid_response: Transaction response message
self.xid_cv = Condition()
self.xid = None
self.xid_response = None
self.debug = False
self.buffered_input = ""
# Create listen socket
self.logger.info("Create/listen at " + self.host + ":" +
str(self.port))
ai = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC,
socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
# Use first returned addrinfo
(family, socktype, proto, name, sockaddr) = ai[0]
self.listen_socket = socket.socket(family, socktype)
self.listen_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.listen_socket.bind(sockaddr)
self.switch_socket = self.listen_socket
# Make sure the socket does not already exist
try:
os.unlink(self.tun_unix_addr)
except OSError:
if os.path.exists(self.tun_unix_addr):
raise
self.bridge_socket = socket.socket(socket.AF_UNIX,socket.SOCK_DGRAM)
# Bind the socket to the port
self.logger.info("Create/listen at " + str(self.tun_unix_addr))
self.bridge_socket.bind(self.tun_unix_addr)
def filter_packet(self, rawmsg, hdr):
"""
Check if packet should be filtered
Currently filters packet in messages
@return Boolean, True if packet should be dropped
"""
# XXX didn't actually check for packet-in...
return False
# Add check for packet in and rate limit
if self.filter_packet_in:
# If we were dropping packets, report number dropped
# TODO dont drop expected packet ins
if self.pkt_in_run > self.pkt_in_filter_limit:
self.logger.debug("Dropped %d packet ins (%d total)"
% ((self.pkt_in_run -
self.pkt_in_filter_limit),
self.pkt_in_dropped))
self.pkt_in_run = 0
return False
def _pkt_handle(self, pkt):
"""
Check for all packet handling conditions
Parse and verify message
Check if XID matches something waiting
Check if message is being expected for a poll operation
Check if keep alive is on and message is an echo request
Check if any registered handler wants the packet
Enqueue if none of those conditions is met
an echo request in case keep_alive is true, followed by
registered message handlers.
@param pkt The raw packet (string) which may contain multiple OF msgs
"""
# snag any left over data from last read()
# Parse the header to get type
offset, payload_len, subtype, nxp_sniffer = lowpan.message.parse_header(pkt[0])
# Extract the raw message bytes
rawmsg = pkt[0][offset : offset + payload_len]
if self.debug:
print(pkt[1])
print(util.hex_dump_buffer(rawmsg))
# Now check for message handlers; preference is given to
# handlers for a specific packet
handled = False
# Send to bridge socket
if self.bdg_unix_addr:
self.bridge_socket.sendto(rawmsg,self.bdg_unix_addr)
handled = True
if subtype in self.handlers.keys():
handled = self.handlers[subtype](self, nxp_sniffer, rawmsg)
if not handled and ("all" in self.handlers.keys()):
handled = self.handlers["all"](self, nxp_sniffer, rawmsg)
if not handled: # Not handled, enqueue
with self.packets_cv:
if len(self.packets) >= self.max_pkts:
self.packets.pop(0)
self.packets_expired += 1
self.packets.append((nxp_sniffer, rawmsg))
self.packets_cv.notify_all()
self.packets_total += 1
else:
self.packets_handled += 1
self.logger.debug("Message handled by callback")
def _socket_ready_handle(self, s):
"""
Handle an input-ready socket
@param s The socket object that is ready
@returns 0 on success, -1 on error
"""
if s and s == self.switch_socket:
for idx in range(3): # debug: try a couple of times
try:
pkt = self.switch_socket.recvfrom(self.rcv_size)
except:
self.logger.warning("Error on switch read")
return -1
if not self.active:
return 0
if len(pkt) == 0:
self.logger.warning("Zero-length switch read, %d" % idx)
else:
break
if len(pkt) == 0: # Still no packet
self.logger.warning("Zero-length switch read; closing cxn")
self.logger.info(str(self))
return -1
self._pkt_handle(pkt)
elif s and s == self.waker:
self.waker.wait()
else:
self.logger.error("Unknown socket ready: " + str(s))
return -1
return 0
def active_connect(self):
"""
Actively connect to a switch IP addr
"""
try:
self.logger.info("Trying active connection to %s" % self.switch)
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.connect((self.switch, self.port))
self.logger.info("Connected to " + self.switch + " on " +
str(self.port))
soc.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
self.switch_addr = (self.switch, self.port)
return soc
except (StandardError, socket.error) as e:
self.logger.error("Could not connect to %s at %d:: %s" %
(self.switch, self.port, str(e)))
return None
def wakeup(self):
"""
Wake up the event loop, presumably from another thread.
"""
self.waker.notify()
def sockets(self):
"""
Return list of sockets to select on.
"""
socs = [self.listen_socket, self.bridge_socket, self.waker]
return [x for x in socs if x]
def run(self):
"""
Activity function for class
Assumes connection to switch already exists. Listens on
switch_socket for messages until an error (or zero len pkt)
occurs.
When there is a message on the socket, check for handlers; queue the
packet if no one handles the packet.
See note for controller describing the limitation of a single
connection for now.
"""
self.dbg_state = "running"
while self.active:
try:
sel_in, sel_out, sel_err = \
select.select(self.sockets(), [], self.sockets(), 1)
except:
print( sys.exc_info())
self.logger.error("Select error, disconnecting")
self.disconnect()
for s in sel_err:
self.logger.error("Got socket error on: " + str(s) + ", disconnecting")
self.disconnect()
for s in sel_in:
if self._socket_ready_handle(s) == -1:
self.disconnect()
# End of main loop
self.dbg_state = "closing"
self.logger.info("Exiting controller thread")
self.shutdown()
def connect(self, timeout=-1):
"""
Connect to the switch
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if connected
"""
pass
def disconnect(self, timeout=-1):
"""
If connected to a switch, disconnect.
"""
if self.switch_socket:
self.switch_socket.close()
self.switch_socket = None
self.switch_addr = None
with self.packets_cv:
self.packets = []
with self.connect_cv:
self.connect_cv.notifyAll()
if self.bridge_socket:
self.bridge_socket.close()
def wait_disconnected(self, timeout=-1):
"""
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if disconnected
"""
with self.connect_cv:
util.timed_wait(self.connect_cv,
lambda: True if not self.switch_socket else None,
timeout=timeout)
return self.switch_socket is None
def kill(self):
"""
Force the controller thread to quit
"""
self.active = False
self.wakeup()
self.join()
def shutdown(self):
"""
Shutdown the controller closing all sockets
@todo Might want to synchronize shutdown with self.sync...
"""
self.active = False
try:
self.listen_socket.shutdown(socket.SHUT_RDWR)
except:
self.logger.info("Ignoring listen soc shutdown error")
self.listen_socket = None
with self.connect_cv:
self.connect_cv.notifyAll()
self.wakeup()
self.dbg_state = "down"
def register(self, msg_type, handler):
"""
Register a callback to receive a specific message type.
Only one handler may be registered for a given message type.
WARNING: A lock is held during the handler call back, so
the handler should not make any blocking calls
@param msg_type The type of message to receive. May be DEFAULT
for all non-handled packets. The special type, the string "all"
will send all packets to the handler.
@param handler The function to call when a message of the given
type is received.
"""
# Should check type is valid
if not handler and msg_type in self.handlers.keys():
del self.handlers[msg_type]
return
self.handlers[msg_type] = handler
def poll(self, exp_msg=None, timeout=-1):
"""
Wait for the next OF message received from the switch.
@param exp_msg If set, return only when this type of message
is received (unless timeout occurs).
@param timeout Maximum number of seconds to wait for the message.
Pass -1 for the default timeout.
@retval A pair (msg, pkt) where msg is a message object and pkt
the string representing the packet as received from the socket.
This allows additional parsing by the receiver if necessary.
The data members in the message are in host endian order.
If an error occurs, (None, None) is returned
"""
if exp_msg is None:
self.logger.warn("DEPRECATED polling for any message class")
klass = None
else:
raise ValueError("Unexpected exp_msg argument %r" % exp_msg)
# Take the packet from the queue
def grab():
for i, (msg, pkt) in enumerate(self.packets):
if klass is None or isinstance(msg, klass):
self.logger.debug("Got %s message", msg.__class__.__name__)
return self.packets.pop(i)
# Not found
return None
with self.packets_cv:
ret = util.timed_wait(self.packets_cv, grab, timeout=timeout)
if ret != None:
(msg, pkt) = ret
return (msg, pkt)
else:
return (None, None)
def transact(self, msg, timeout=-1):
"""
Run a message transaction with the switch
Send the message in msg and wait for a reply with a matching
transaction id. Transactions have the highest priority in
received message handling.
@param msg The message object to send; must not be a string
@param timeout The timeout in seconds; if -1 use default.
"""
if msg.xid == None:
msg.xid = util.gen_xid()
self.logger.debug("Running transaction %d" % msg.xid)
with self.xid_cv:
if self.xid:
self.logger.error("Can only run one transaction at a time")
return (None, None)
self.xid = msg.xid
self.xid_response = None
self.message_send(msg)
self.logger.debug("Waiting for transaction %d" % msg.xid)
util.timed_wait(self.xid_cv, lambda: self.xid_response, timeout=timeout)
if self.xid_response:
(resp, pkt) = self.xid_response
self.xid_response = None
else:
|
if resp is None:
self.logger.warning("No response for xid " + str(self.xid))
return (resp, pkt)
def message_send(self, msg):
"""
Send the message to the switch
@param msg A string or OpenFlow message object to be forwarded to
the switch.
"""
if not self.switch_socket:
# Sending a string indicates the message is ready to go
raise Exception("no socket")
if msg.xid == None:
msg.xid = util.gen_xid()
outpkt = msg.pack()
self.logger.debug("Msg out: version %d class %s len %d xid %d",
msg.version, type(msg).__name__, len(outpkt), msg.xid)
with self.tx_lock:
if self.switch_socket.sendall(outpkt) is not None:
raise AssertionError("failed to send message to switch")
return 0 # for backwards compatibility
def clear_queue(self):
"""
Clear the input queue and report the number of messages
that were in it
"""
enqueued_pkt_count = len(self.packets)
with self.packets_cv:
self.packets = []
return enqueued_pkt_count
def __str__(self):
string = "Controller:\n"
string += " state " + self.dbg_state + "\n"
string += " switch_addr " + str(self.switch_addr) + "\n"
string += " pending pkts " + str(len(self.packets)) + "\n"
string += " total pkts " + str(self.packets_total) + "\n"
string += " expired pkts " + str(self.packets_expired) + "\n"
string += " handled pkts " + str(self.packets_handled) + "\n"
string += " poll discards " + str(self.poll_discards) + "\n"
string += " parse errors " + str(self.parse_errors) + "\n"
string += " sock errrors " + str(self.socket_errors) + "\n"
string += " max pkts " + str(self.max_pkts) + "\n"
string += " target switch " + str(self.switch) + "\n"
string += " host " + str(self.host) + "\n"
string += " port " + str(self.port) + "\n"
string += " keep_alive " + str(self.keep_alive) + "\n"
string += " pkt_in_run " + str(self.pkt_in_run) + "\n"
string += " pkt_in_dropped " + str(self.pkt_in_dropped) + "\n"
return string
def show(self):
print(str(self))
def sample_handler(controller, msg, pkt):
"""
Sample message handler
This is the prototype for functions registered with the controller
class for packet reception
@param controller The controller calling the handler
@param msg The parsed message object
@param pkt The raw packet that was received on the socket. This is
in case the packet contains extra unparsed data.
@returns Boolean value indicating if the packet was handled. If
not handled, the packet is placed in the queue for pollers to received
"""
pass
| (resp, pkt) = (None, None) | conditional_block |
tunnel.py | import sys
import os
import socket
import time
import struct
import select
import logging
from threading import Thread
from threading import Lock
from threading import Condition
from lowpan import util
import lowpan
##@todo Find a better home for these identifiers (controller)
RCV_SIZE_DEFAULT = 32768
LISTEN_QUEUE_SIZE = 1
| Class abstracting the control interface to the switch.
For receiving messages, two mechanism will be implemented. First,
query the interface with poll. Second, register to have a
function called by message type. The callback is passed the
message type as well as the raw packet (or message object)
One of the main purposes of this object is to translate between network
and host byte order. 'Above' this object, things should be in host
byte order.
@todo Consider using SocketServer for listening socket
@todo Test transaction code
@var rcv_size The receive size to use for receive calls
@var max_pkts The max size of the receive queue
@var keep_alive If true, listen for echo requests and respond w/
echo replies
@var initial_hello If true, will send a hello message immediately
upon connecting to the switch
@var switch If not None, do an active connection to the switch
@var host The host to use for connect
@var port The port to connect on
@var packets_total Total number of packets received
@var packets_expired Number of packets popped from queue as queue full
@var packets_handled Number of packets handled by something
@var dbg_state Debug indication of state
"""
def __init__(self, bdg_unix_addr = None ,tun_unix_addr = 'uds_tunnel', host='192.168.2.200', port=1024, max_pkts=1024):
Thread.__init__(self)
# Socket related
self.rcv_size = RCV_SIZE_DEFAULT
self.listen_socket = None
self.switch_socket = None
self.switch_addr = None
self.connect_cv = Condition()
self.message_cv = Condition()
self.tx_lock = Lock()
# Used to wake up the event loop from another thread
self.waker = util.EventDescriptor()
# Counters
self.socket_errors = 0
self.parse_errors = 0
self.packets_total = 0
self.packets_expired = 0
self.packets_handled = 0
self.poll_discards = 0
# State
self.sync = Lock()
self.handlers = {}
self.keep_alive = False
self.active = True
self.initial_hello = True
# OpenFlow message/packet queue
# Protected by the packets_cv lock / condition variable
self.packets = []
self.packets_cv = Condition()
self.packet_in_count = 0
# Settings
self.max_pkts = max_pkts
self.bdg_unix_addr = bdg_unix_addr
self.tun_unix_addr = tun_unix_addr
self.host = host
self.port = port
self.dbg_state = "init"
self.logger = logging.getLogger("VirtualTunnel")
self.filter_packet_in = False # Drop "excessive" packet ins
self.pkt_in_run = 0 # Count on run of packet ins
self.pkt_in_filter_limit = 50 # Count on run of packet ins
self.pkt_in_dropped = 0 # Total dropped packet ins
self.transact_to = 15 # Transact timeout default value; add to config
# Transaction and message type waiting variables
# xid_cv: Condition variable (semaphore) for packet waiters
# xid: Transaction ID being waited on
# xid_response: Transaction response message
self.xid_cv = Condition()
self.xid = None
self.xid_response = None
self.debug = False
self.buffered_input = ""
# Create listen socket
self.logger.info("Create/listen at " + self.host + ":" +
str(self.port))
ai = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC,
socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
# Use first returned addrinfo
(family, socktype, proto, name, sockaddr) = ai[0]
self.listen_socket = socket.socket(family, socktype)
self.listen_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.listen_socket.bind(sockaddr)
self.switch_socket = self.listen_socket
# Make sure the socket does not already exist
try:
os.unlink(self.tun_unix_addr)
except OSError:
if os.path.exists(self.tun_unix_addr):
raise
self.bridge_socket = socket.socket(socket.AF_UNIX,socket.SOCK_DGRAM)
# Bind the socket to the port
self.logger.info("Create/listen at " + str(self.tun_unix_addr))
self.bridge_socket.bind(self.tun_unix_addr)
def filter_packet(self, rawmsg, hdr):
"""
Check if packet should be filtered
Currently filters packet in messages
@return Boolean, True if packet should be dropped
"""
# XXX didn't actually check for packet-in...
return False
# Add check for packet in and rate limit
if self.filter_packet_in:
# If we were dropping packets, report number dropped
# TODO dont drop expected packet ins
if self.pkt_in_run > self.pkt_in_filter_limit:
self.logger.debug("Dropped %d packet ins (%d total)"
% ((self.pkt_in_run -
self.pkt_in_filter_limit),
self.pkt_in_dropped))
self.pkt_in_run = 0
return False
def _pkt_handle(self, pkt):
"""
Check for all packet handling conditions
Parse and verify message
Check if XID matches something waiting
Check if message is being expected for a poll operation
Check if keep alive is on and message is an echo request
Check if any registered handler wants the packet
Enqueue if none of those conditions is met
an echo request in case keep_alive is true, followed by
registered message handlers.
@param pkt The raw packet (string) which may contain multiple OF msgs
"""
# snag any left over data from last read()
# Parse the header to get type
offset, payload_len, subtype, nxp_sniffer = lowpan.message.parse_header(pkt[0])
# Extract the raw message bytes
rawmsg = pkt[0][offset : offset + payload_len]
if self.debug:
print(pkt[1])
print(util.hex_dump_buffer(rawmsg))
# Now check for message handlers; preference is given to
# handlers for a specific packet
handled = False
# Send to bridge socket
if self.bdg_unix_addr:
self.bridge_socket.sendto(rawmsg,self.bdg_unix_addr)
handled = True
if subtype in self.handlers.keys():
handled = self.handlers[subtype](self, nxp_sniffer, rawmsg)
if not handled and ("all" in self.handlers.keys()):
handled = self.handlers["all"](self, nxp_sniffer, rawmsg)
if not handled: # Not handled, enqueue
with self.packets_cv:
if len(self.packets) >= self.max_pkts:
self.packets.pop(0)
self.packets_expired += 1
self.packets.append((nxp_sniffer, rawmsg))
self.packets_cv.notify_all()
self.packets_total += 1
else:
self.packets_handled += 1
self.logger.debug("Message handled by callback")
def _socket_ready_handle(self, s):
"""
Handle an input-ready socket
@param s The socket object that is ready
@returns 0 on success, -1 on error
"""
if s and s == self.switch_socket:
for idx in range(3): # debug: try a couple of times
try:
pkt = self.switch_socket.recvfrom(self.rcv_size)
except:
self.logger.warning("Error on switch read")
return -1
if not self.active:
return 0
if len(pkt) == 0:
self.logger.warning("Zero-length switch read, %d" % idx)
else:
break
if len(pkt) == 0: # Still no packet
self.logger.warning("Zero-length switch read; closing cxn")
self.logger.info(str(self))
return -1
self._pkt_handle(pkt)
elif s and s == self.waker:
self.waker.wait()
else:
self.logger.error("Unknown socket ready: " + str(s))
return -1
return 0
def active_connect(self):
"""
Actively connect to a switch IP addr
"""
try:
self.logger.info("Trying active connection to %s" % self.switch)
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.connect((self.switch, self.port))
self.logger.info("Connected to " + self.switch + " on " +
str(self.port))
soc.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
self.switch_addr = (self.switch, self.port)
return soc
except (StandardError, socket.error) as e:
self.logger.error("Could not connect to %s at %d:: %s" %
(self.switch, self.port, str(e)))
return None
def wakeup(self):
"""
Wake up the event loop, presumably from another thread.
"""
self.waker.notify()
def sockets(self):
"""
Return list of sockets to select on.
"""
socs = [self.listen_socket, self.bridge_socket, self.waker]
return [x for x in socs if x]
def run(self):
"""
Activity function for class
Assumes connection to switch already exists. Listens on
switch_socket for messages until an error (or zero len pkt)
occurs.
When there is a message on the socket, check for handlers; queue the
packet if no one handles the packet.
See note for controller describing the limitation of a single
connection for now.
"""
self.dbg_state = "running"
while self.active:
try:
sel_in, sel_out, sel_err = \
select.select(self.sockets(), [], self.sockets(), 1)
except:
print( sys.exc_info())
self.logger.error("Select error, disconnecting")
self.disconnect()
for s in sel_err:
self.logger.error("Got socket error on: " + str(s) + ", disconnecting")
self.disconnect()
for s in sel_in:
if self._socket_ready_handle(s) == -1:
self.disconnect()
# End of main loop
self.dbg_state = "closing"
self.logger.info("Exiting controller thread")
self.shutdown()
def connect(self, timeout=-1):
"""
Connect to the switch
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if connected
"""
pass
def disconnect(self, timeout=-1):
"""
If connected to a switch, disconnect.
"""
if self.switch_socket:
self.switch_socket.close()
self.switch_socket = None
self.switch_addr = None
with self.packets_cv:
self.packets = []
with self.connect_cv:
self.connect_cv.notifyAll()
if self.bridge_socket:
self.bridge_socket.close()
def wait_disconnected(self, timeout=-1):
"""
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if disconnected
"""
with self.connect_cv:
util.timed_wait(self.connect_cv,
lambda: True if not self.switch_socket else None,
timeout=timeout)
return self.switch_socket is None
def kill(self):
"""
Force the controller thread to quit
"""
self.active = False
self.wakeup()
self.join()
def shutdown(self):
"""
Shutdown the controller closing all sockets
@todo Might want to synchronize shutdown with self.sync...
"""
self.active = False
try:
self.listen_socket.shutdown(socket.SHUT_RDWR)
except:
self.logger.info("Ignoring listen soc shutdown error")
self.listen_socket = None
with self.connect_cv:
self.connect_cv.notifyAll()
self.wakeup()
self.dbg_state = "down"
def register(self, msg_type, handler):
"""
Register a callback to receive a specific message type.
Only one handler may be registered for a given message type.
WARNING: A lock is held during the handler call back, so
the handler should not make any blocking calls
@param msg_type The type of message to receive. May be DEFAULT
for all non-handled packets. The special type, the string "all"
will send all packets to the handler.
@param handler The function to call when a message of the given
type is received.
"""
# Should check type is valid
if not handler and msg_type in self.handlers.keys():
del self.handlers[msg_type]
return
self.handlers[msg_type] = handler
def poll(self, exp_msg=None, timeout=-1):
"""
Wait for the next OF message received from the switch.
@param exp_msg If set, return only when this type of message
is received (unless timeout occurs).
@param timeout Maximum number of seconds to wait for the message.
Pass -1 for the default timeout.
@retval A pair (msg, pkt) where msg is a message object and pkt
the string representing the packet as received from the socket.
This allows additional parsing by the receiver if necessary.
The data members in the message are in host endian order.
If an error occurs, (None, None) is returned
"""
if exp_msg is None:
self.logger.warn("DEPRECATED polling for any message class")
klass = None
else:
raise ValueError("Unexpected exp_msg argument %r" % exp_msg)
# Take the packet from the queue
def grab():
for i, (msg, pkt) in enumerate(self.packets):
if klass is None or isinstance(msg, klass):
self.logger.debug("Got %s message", msg.__class__.__name__)
return self.packets.pop(i)
# Not found
return None
with self.packets_cv:
ret = util.timed_wait(self.packets_cv, grab, timeout=timeout)
if ret != None:
(msg, pkt) = ret
return (msg, pkt)
else:
return (None, None)
def transact(self, msg, timeout=-1):
"""
Run a message transaction with the switch
Send the message in msg and wait for a reply with a matching
transaction id. Transactions have the highest priority in
received message handling.
@param msg The message object to send; must not be a string
@param timeout The timeout in seconds; if -1 use default.
"""
if msg.xid == None:
msg.xid = util.gen_xid()
self.logger.debug("Running transaction %d" % msg.xid)
with self.xid_cv:
if self.xid:
self.logger.error("Can only run one transaction at a time")
return (None, None)
self.xid = msg.xid
self.xid_response = None
self.message_send(msg)
self.logger.debug("Waiting for transaction %d" % msg.xid)
util.timed_wait(self.xid_cv, lambda: self.xid_response, timeout=timeout)
if self.xid_response:
(resp, pkt) = self.xid_response
self.xid_response = None
else:
(resp, pkt) = (None, None)
if resp is None:
self.logger.warning("No response for xid " + str(self.xid))
return (resp, pkt)
def message_send(self, msg):
"""
Send the message to the switch
@param msg A string or OpenFlow message object to be forwarded to
the switch.
"""
if not self.switch_socket:
# Sending a string indicates the message is ready to go
raise Exception("no socket")
if msg.xid == None:
msg.xid = util.gen_xid()
outpkt = msg.pack()
self.logger.debug("Msg out: version %d class %s len %d xid %d",
msg.version, type(msg).__name__, len(outpkt), msg.xid)
with self.tx_lock:
if self.switch_socket.sendall(outpkt) is not None:
raise AssertionError("failed to send message to switch")
return 0 # for backwards compatibility
def clear_queue(self):
"""
Clear the input queue and report the number of messages
that were in it
"""
enqueued_pkt_count = len(self.packets)
with self.packets_cv:
self.packets = []
return enqueued_pkt_count
def __str__(self):
string = "Controller:\n"
string += " state " + self.dbg_state + "\n"
string += " switch_addr " + str(self.switch_addr) + "\n"
string += " pending pkts " + str(len(self.packets)) + "\n"
string += " total pkts " + str(self.packets_total) + "\n"
string += " expired pkts " + str(self.packets_expired) + "\n"
string += " handled pkts " + str(self.packets_handled) + "\n"
string += " poll discards " + str(self.poll_discards) + "\n"
string += " parse errors " + str(self.parse_errors) + "\n"
string += " sock errrors " + str(self.socket_errors) + "\n"
string += " max pkts " + str(self.max_pkts) + "\n"
string += " target switch " + str(self.switch) + "\n"
string += " host " + str(self.host) + "\n"
string += " port " + str(self.port) + "\n"
string += " keep_alive " + str(self.keep_alive) + "\n"
string += " pkt_in_run " + str(self.pkt_in_run) + "\n"
string += " pkt_in_dropped " + str(self.pkt_in_dropped) + "\n"
return string
def show(self):
print(str(self))
def sample_handler(controller, msg, pkt):
"""
Sample message handler
This is the prototype for functions registered with the controller
class for packet reception
@param controller The controller calling the handler
@param msg The parsed message object
@param pkt The raw packet that was received on the socket. This is
in case the packet contains extra unparsed data.
@returns Boolean value indicating if the packet was handled. If
not handled, the packet is placed in the queue for pollers to received
"""
pass | class VirtualTunnel(Thread):
""" | random_line_split |
data.py | #!/usr/bin/env python2.7
import re
import string
from collections import defaultdict, namedtuple
from functools import partial
from itertools import count
from progressbar import ProgressBar
from hashlib import sha1
from math import log10
import os
import sys
LOGZERO = -sys.maxint - 1
exp = partial(pow, 10)
log = lambda x: LOGZERO if x == 0 else log10(x)
ALPHABET = [chr(ord('a') + i) for i in xrange(ord('z') - ord('a') + 1)] + [chr(ord('A') + i) for i in xrange(ord('Z') - ord('A') + 1)]
EM_EPSILON = 10**-6
EPSILON = 10**-8
def tokenize(s):
def is_word(x):
return len(x) and x[0] in ALPHABET
def convert(x):
return re.sub('[^a-zA-Z]', '', x).lower()
return filter(is_word, map(convert, re.findall(r"[\w'.]+", s)))
def add_log(x, y):
x,y = max(x,y), min(x,y)
if y <= LOGZERO:
return x
negdiff = y-x
return x + log(1 + exp(negdiff))
class LanguageModel(object):
def __init__(self, n):
self.smoothing = None
self.interpolate = False
self.lmbd = 0
self.models = defaultdict(dict)
self.voc = []
self.n = n
self.prob_no_information = LOGZERO
def set_model(self, n, model):
self.models[n] = model
def setn(self, n):
self.n = n
def set_voc(self, voc):
self.voc = voc
self.voc_size = len(voc)
def set_prob(self, n, words, log_prob):
w = tuple(words)
self.models[n][w] = log_prob
def set_smoothing(self, s, lmbd=1.0):
self.smoothing = s
if self.smoothing == 'ls':
self.lmbd = lmbd
elif self.smoothing == 'wb':
self.interpolate = True
def get_smoothing(self):
return self.smoothing, self.lmbd
def get_prob(self, words):
n = len(words)
words = tuple(w if w[0] == '<' or w in self.voc else '<UNK>' for w in words)
d = self.models[n]
if words in d:
return d[words]
if len(words) > 1 and words[:-1] not in self.models[n-1]:
# Previous words were not seen
if self.prob_no_information != LOGZERO:
# Use probability when there's no information
return self.prob_no_information
else:
# Backoff
return self.get_prob(words[1:])
# Check whether we did some smoothing which raised nonzero probs to some value
other = words[:-1] + ('<OTHER>',)
if other in d:
return d[other]
# Shouldn't reach here!
raise Exception(words)
def __getitem__(self, item):
return self.models[item]
def __len__(self):
return len(self.models)
def dump(self, output_file):
with open(output_file, 'wb') as f:
f.write('\\data\\\n')
for n in self.models:
f.write('ngram %d=%d\n' % (n, len(self.models[n])))
f.write('\n')
f.write('\\smoothing\\\n')
f.write('%s\t%.32f\n' % (self.smoothing, self.lmbd))
f.write('%.32f\n' % self.prob_no_information)
f.write('\n')
for n in self.models:
f.write('\\%d-grams:\n' % n)
for words, prob in self.models[n].iteritems():
f.write('%.32f\t%s\n' % (prob ," ".join(words)))
@staticmethod
def load(f):
# \data\
f.readline()
ngram = 0
while True:
l = f.readline().strip()
if not l:
break
ngram = max(ngram, int(l.split('ngram ')[1].split('=')[0]))
assert ngram != 0, "Can't find ngram in file!"
lm = LanguageModel(ngram)
# \smoothing\
f.readline()
smooth_line = f.readline().strip()
lm.prob_no_information = float(f.readline().strip())
f.readline()
smoothing, lmbd = smooth_line.split()
lm.set_smoothing(smoothing, float(lmbd))
# N-grams
current_ngram = 0
voc = set()
while True:
l = f.readline().strip()
if not l:
break
elif l.startswith('\\'): # descriptor
current_ngram = int(l[1])
else: # data line
assert current_ngram != 0, 'Invalid n-gram'
log_prob, words = l.split('\t', 1)
log_prob = float(log_prob)
words = tuple(words.split(' '))
lm.set_prob(current_ngram, words, log_prob)
if current_ngram == 1:
voc.add(words[0])
lm.set_voc(voc)
return lm
def _count_grams(self, lines_tokens):
''' Count ngrams in a given list of tokenized lines '''
# T(w) in Witten-Bell smoothing
# types_after[w] = set of all types that occur after w
types_after = defaultdict(set)
# N(w) in Witten-Bell smoothing
# num_tokens_after[w] = number of tokens that occur after w
num_tokens_after = defaultdict(int)
tokens = 0
grams = [defaultdict(int) for _ in xrange(self.n+1)]
voc = set()
for i, words in enumerate(lines_tokens):
for word in words:
voc.add(word)
num_words = len(words)
tokens += num_words
for l in xrange(1, self.n+1):
for j in xrange (l, num_words+1):
gram = tuple(words[j-l:j])
if l > 1:
# Account T(w) and N(w) in WB smoothing
prev = gram[:-1]
types_after[prev].add(gram[-1])
num_tokens_after[prev] += 1
grams[l][gram] += 1
grams[0] = dict()
grams[0][tuple()] = tokens
return tokens, grams, voc, types_after, num_tokens_after
def train_model(self, lines, silent=False):
# Print to log if needed
if silent:
def info(s):
pass
else:
def info(s):
print s
if self.interpolate:
n_held_out = int(len(lines)*.1)
held_out = lines[:n_held_out]
data = lines[n_held_out:]
else:
data = lines
info('Tokenizing...')
data_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in data]
if self.interpolate:
held_out_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in held_out]
# Build the lexicon as words that appear more than once, should be at least 99% of number of tokens
info('Building lexicon...')
lexicon = set()
unk = set()
counts = defaultdict(int)
for l in data_tokens:
for w in l:
if w[0] == '<':
continue
counts[w] += 1
if counts[w] == 1:
unk.add(w)
elif counts[w] == 2:
lexicon.add(w)
unk.remove(w)
del counts
while len(lexicon) < 0.99*(len(lexicon) + len(unk)):
for w in unk:
unk.remove(w)
lexicon.add(w)
break
del unk
info('Replacing OOV words with <UNK>...')
for l in data_tokens:
for i in xrange(len(l)):
if l[i][0] != '<' and l[i] not in lexicon:
l[i] = '<UNK>'
del lexicon
info('Counting ngrams...')
tokens, grams, voc, types_after, num_tokens_after = self._count_grams(data_tokens)
self.set_voc(voc)
num_types_after = defaultdict(int, ((x, len(types_after[x])) for x in types_after))
num_types_after[tuple()] = self.voc_size
num_tokens_after[tuple()] = tokens
info('Calculating probabilities...')
self.set_prob(1, ('<OTHER>',), LOGZERO)
for l in xrange(1, self.n+1):
for gram, gram_count in grams[l].iteritems():
prev = gram[:-1]
log_prob = self._calculate_log_prob(l, grams, gram_count, grams[l-1][prev],
num_types_after[prev], num_tokens_after[prev])
self.set_prob(l, gram, log_prob)
# Calculate probabilities for unseen ngrams (after smoothing they get a nonzero value...)
for l in xrange(2, self.n+1):
for base_gram, base_gram_count in grams[l-1].iteritems():
log_prob = self._calculate_log_prob(l, grams, 0, base_gram_count, num_types_after[base_gram],
num_tokens_after[base_gram])
self.set_prob(l, base_gram + ('<OTHER>',), log_prob)
# Pr(W_n | W_{n-1}) when C(W_n)=0, C(W_{n-1})=0
self.prob_no_information = self._calculate_log_prob(2, grams, 0, 0, 0, 0)
if not self.interpolate:
return
info('Interpolating...')
_, held_out_grams, _, held_out_types_after, held_out_num_tokens_after = \
self._count_grams(held_out_tokens)
for base_gram in grams[self.n-1]:
# Use EM to calculate lambda-values which provide max log-likelihood on held-out data
# Based on: https://www.cs.cmu.edu/~roni/11761/Presentations/degenerateEM.pdf
if held_out_num_tokens_after[base_gram] == 0:
# This base gram is so rare that it doesn't appear in the held-out data -- interpolation
# won't matter much in this case anyway!
continue
log_lmbds = [log(1) - log(self.n)]*self.n
prev_loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
for t in count(1):
# E-step
# log_denoms[w] = lg(denominator for word w)
log_denoms = dict()
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
log_denoms[w] = self._calculate_interpolated_prob(gram, log_lmbds)
# M-step
for j in xrange(self.n):
new_log_lmbd = LOGZERO
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
val = log_lmbds[j] + self.get_prob(gram[-j-1:]) \
+ log(held_out_grams[self.n][gram]) - log_denoms[w]
new_log_lmbd = add_log(new_log_lmbd, val)
log_lmbds[j] = new_log_lmbd - log(held_out_num_tokens_after[base_gram])
# Check for convergence
loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
assert loglikelihood >= prev_loglikelihood
if loglikelihood - prev_loglikelihood <= EM_EPSILON:
break
prev_loglikelihood = loglikelihood
# Calculate the new interpolated probabilities
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
new_prob = self._calculate_interpolated_prob(gram, log_lmbds)
self.set_prob(self.n, gram, new_prob)
total = add_log(total, new_prob)
# All other unseen probabilities - (1-Sum_w(Pr(w|base))) / Z(base)
new_other_prob = log(1.0 - exp(total)) - log(self.voc_size - num_types_after[base_gram])
self.set_prob(self.n, base_gram + ('<OTHER>',), new_other_prob)
# Verify probabilities sum to 1
info('Testing...')
l = self.n
for base_gram in grams[l-1].keys():
if base_gram[-1] == '</s>':
continue
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
total = add_log(total, self.get_prob(gram))
total = add_log(total, self.get_prob(base_gram + ('<OTHER>',)) + \
log(self.voc_size - num_types_after[base_gram]))
if abs(total-0.0) > EPSILON:
raise Exception('Bad total for %s: %.32f' % (base_gram, exp(total)))
def _calculate_log_prob(self, l, grams, gram_count, prev_gram_count, num_types_after, num_tokens_after):
# No need to smooth 1-grams obviously...
if l == 1 or self.smoothing == 'none' or (self.smoothing == 'ls' and self.lmbd == 0):
if prev_gram_count == 0:
# No prior information, assume uniform distribution
log_prob = -log(self.voc_size)
elif gram_count == 0:
log_prob = LOGZERO
else:
log_prob = log(gram_count) - log(prev_gram_count)
elif self.smoothing == 'ls':
log_prob = log(gram_count + self.lmbd) - log(prev_gram_count + self.lmbd * self.voc_size)
elif self.smoothing == 'wb':
z = self.voc_size - num_types_after
if gram_count == 0:
if num_types_after == 0:
log_prob = LOGZERO
else:
log_prob = log(num_types_after) - (log(z) + log(num_tokens_after + num_types_after))
else:
log_prob = log(gram_count) - log(num_tokens_after + num_types_after)
else:
raise Exception('Invalid smoothing %s' % self.smoothing)
return log_prob
def _calculate_interpolated_prob(self, gram, log_lmbds):
return reduce(add_log, (log_lmbds[k] + self.get_prob(gram[-k-1:]) for k in xrange(self.n)))
def _calculate_lmbds_loglikelihood(self, base, log_lmbds, grams, types_after, num_tokens_after):
if num_tokens_after[base] == 0:
return 0
log_likelihood = 0
for w in types_after[base]:
gram = base + (w,)
val = self._calculate_interpolated_prob(gram, log_lmbds)
log_likelihood += grams[self.n][gram] * val
log_likelihood /= num_tokens_after[base]
return log_likelihood
def load_test_file(n, lines):
n_grams = []
for line in lines:
try:
l = ['<s>'] + tokenize(line) + ['</s>']
except:
continue
n_grams.extend((l[max(0, x-n+1):x+1] for x in xrange(len(l))))
return n_grams
def normalize(a):
| s = float(sum(a))
for i in a:
a[i] /= s | identifier_body | |
data.py | #!/usr/bin/env python2.7
import re
import string
from collections import defaultdict, namedtuple
from functools import partial
from itertools import count
from progressbar import ProgressBar
from hashlib import sha1
from math import log10
import os
import sys
LOGZERO = -sys.maxint - 1
exp = partial(pow, 10)
log = lambda x: LOGZERO if x == 0 else log10(x)
ALPHABET = [chr(ord('a') + i) for i in xrange(ord('z') - ord('a') + 1)] + [chr(ord('A') + i) for i in xrange(ord('Z') - ord('A') + 1)]
EM_EPSILON = 10**-6
EPSILON = 10**-8
def tokenize(s):
def is_word(x):
return len(x) and x[0] in ALPHABET
def convert(x):
return re.sub('[^a-zA-Z]', '', x).lower()
return filter(is_word, map(convert, re.findall(r"[\w'.]+", s)))
def add_log(x, y):
x,y = max(x,y), min(x,y)
if y <= LOGZERO:
return x
negdiff = y-x
return x + log(1 + exp(negdiff))
class LanguageModel(object):
def __init__(self, n):
self.smoothing = None
self.interpolate = False
self.lmbd = 0
self.models = defaultdict(dict)
self.voc = []
self.n = n
self.prob_no_information = LOGZERO
def set_model(self, n, model):
self.models[n] = model
def setn(self, n):
self.n = n
def set_voc(self, voc):
self.voc = voc
self.voc_size = len(voc)
def set_prob(self, n, words, log_prob):
w = tuple(words)
self.models[n][w] = log_prob
def set_smoothing(self, s, lmbd=1.0):
self.smoothing = s
if self.smoothing == 'ls':
self.lmbd = lmbd
elif self.smoothing == 'wb':
self.interpolate = True
def get_smoothing(self):
return self.smoothing, self.lmbd
def get_prob(self, words):
n = len(words)
words = tuple(w if w[0] == '<' or w in self.voc else '<UNK>' for w in words)
d = self.models[n]
if words in d:
return d[words]
if len(words) > 1 and words[:-1] not in self.models[n-1]:
# Previous words were not seen
if self.prob_no_information != LOGZERO:
# Use probability when there's no information
return self.prob_no_information
else:
# Backoff
return self.get_prob(words[1:])
# Check whether we did some smoothing which raised nonzero probs to some value
other = words[:-1] + ('<OTHER>',)
if other in d:
return d[other]
# Shouldn't reach here!
raise Exception(words)
def __getitem__(self, item):
return self.models[item]
def __len__(self):
return len(self.models)
def dump(self, output_file):
with open(output_file, 'wb') as f:
f.write('\\data\\\n')
for n in self.models:
f.write('ngram %d=%d\n' % (n, len(self.models[n])))
f.write('\n')
f.write('\\smoothing\\\n')
f.write('%s\t%.32f\n' % (self.smoothing, self.lmbd))
f.write('%.32f\n' % self.prob_no_information)
f.write('\n')
for n in self.models:
f.write('\\%d-grams:\n' % n)
for words, prob in self.models[n].iteritems():
f.write('%.32f\t%s\n' % (prob ," ".join(words)))
@staticmethod
def load(f):
# \data\
f.readline()
ngram = 0
while True:
l = f.readline().strip()
if not l:
break
ngram = max(ngram, int(l.split('ngram ')[1].split('=')[0]))
assert ngram != 0, "Can't find ngram in file!"
lm = LanguageModel(ngram)
# \smoothing\
f.readline()
smooth_line = f.readline().strip()
lm.prob_no_information = float(f.readline().strip())
f.readline()
smoothing, lmbd = smooth_line.split()
lm.set_smoothing(smoothing, float(lmbd))
# N-grams
current_ngram = 0
voc = set()
while True:
l = f.readline().strip()
if not l:
break
elif l.startswith('\\'): # descriptor
current_ngram = int(l[1])
else: # data line
assert current_ngram != 0, 'Invalid n-gram'
log_prob, words = l.split('\t', 1)
log_prob = float(log_prob)
words = tuple(words.split(' '))
lm.set_prob(current_ngram, words, log_prob)
if current_ngram == 1:
voc.add(words[0])
lm.set_voc(voc)
return lm
def _count_grams(self, lines_tokens):
''' Count ngrams in a given list of tokenized lines '''
# T(w) in Witten-Bell smoothing
# types_after[w] = set of all types that occur after w
types_after = defaultdict(set)
# N(w) in Witten-Bell smoothing
# num_tokens_after[w] = number of tokens that occur after w
num_tokens_after = defaultdict(int)
tokens = 0
grams = [defaultdict(int) for _ in xrange(self.n+1)]
voc = set()
for i, words in enumerate(lines_tokens):
for word in words:
voc.add(word)
num_words = len(words)
tokens += num_words
for l in xrange(1, self.n+1):
for j in xrange (l, num_words+1):
gram = tuple(words[j-l:j])
if l > 1:
# Account T(w) and N(w) in WB smoothing
prev = gram[:-1]
types_after[prev].add(gram[-1])
num_tokens_after[prev] += 1
grams[l][gram] += 1
grams[0] = dict()
grams[0][tuple()] = tokens
return tokens, grams, voc, types_after, num_tokens_after
def train_model(self, lines, silent=False):
# Print to log if needed
if silent:
def info(s):
pass
else:
def info(s):
print s
if self.interpolate:
n_held_out = int(len(lines)*.1)
held_out = lines[:n_held_out]
data = lines[n_held_out:]
else:
data = lines
info('Tokenizing...')
data_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in data]
if self.interpolate:
held_out_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in held_out]
# Build the lexicon as words that appear more than once, should be at least 99% of number of tokens
info('Building lexicon...')
lexicon = set()
unk = set()
counts = defaultdict(int)
for l in data_tokens:
for w in l:
if w[0] == '<':
continue
counts[w] += 1
if counts[w] == 1:
unk.add(w)
elif counts[w] == 2:
lexicon.add(w)
unk.remove(w)
del counts
while len(lexicon) < 0.99*(len(lexicon) + len(unk)):
for w in unk:
unk.remove(w)
lexicon.add(w)
break
del unk
info('Replacing OOV words with <UNK>...')
for l in data_tokens:
for i in xrange(len(l)):
if l[i][0] != '<' and l[i] not in lexicon:
l[i] = '<UNK>'
del lexicon
info('Counting ngrams...')
tokens, grams, voc, types_after, num_tokens_after = self._count_grams(data_tokens)
self.set_voc(voc)
num_types_after = defaultdict(int, ((x, len(types_after[x])) for x in types_after))
num_types_after[tuple()] = self.voc_size
num_tokens_after[tuple()] = tokens
info('Calculating probabilities...')
self.set_prob(1, ('<OTHER>',), LOGZERO)
for l in xrange(1, self.n+1):
for gram, gram_count in grams[l].iteritems():
prev = gram[:-1]
log_prob = self._calculate_log_prob(l, grams, gram_count, grams[l-1][prev],
num_types_after[prev], num_tokens_after[prev])
self.set_prob(l, gram, log_prob)
# Calculate probabilities for unseen ngrams (after smoothing they get a nonzero value...)
for l in xrange(2, self.n+1):
for base_gram, base_gram_count in grams[l-1].iteritems():
log_prob = self._calculate_log_prob(l, grams, 0, base_gram_count, num_types_after[base_gram],
num_tokens_after[base_gram])
self.set_prob(l, base_gram + ('<OTHER>',), log_prob)
# Pr(W_n | W_{n-1}) when C(W_n)=0, C(W_{n-1})=0
self.prob_no_information = self._calculate_log_prob(2, grams, 0, 0, 0, 0)
if not self.interpolate:
return
info('Interpolating...')
_, held_out_grams, _, held_out_types_after, held_out_num_tokens_after = \
self._count_grams(held_out_tokens)
for base_gram in grams[self.n-1]:
# Use EM to calculate lambda-values which provide max log-likelihood on held-out data
# Based on: https://www.cs.cmu.edu/~roni/11761/Presentations/degenerateEM.pdf
if held_out_num_tokens_after[base_gram] == 0:
# This base gram is so rare that it doesn't appear in the held-out data -- interpolation
# won't matter much in this case anyway!
continue
log_lmbds = [log(1) - log(self.n)]*self.n
prev_loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
for t in count(1):
# E-step
# log_denoms[w] = lg(denominator for word w)
log_denoms = dict()
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
log_denoms[w] = self._calculate_interpolated_prob(gram, log_lmbds)
# M-step
for j in xrange(self.n):
new_log_lmbd = LOGZERO
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
val = log_lmbds[j] + self.get_prob(gram[-j-1:]) \
+ log(held_out_grams[self.n][gram]) - log_denoms[w]
new_log_lmbd = add_log(new_log_lmbd, val)
log_lmbds[j] = new_log_lmbd - log(held_out_num_tokens_after[base_gram])
# Check for convergence
loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
assert loglikelihood >= prev_loglikelihood
if loglikelihood - prev_loglikelihood <= EM_EPSILON:
break
prev_loglikelihood = loglikelihood
# Calculate the new interpolated probabilities
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
new_prob = self._calculate_interpolated_prob(gram, log_lmbds)
self.set_prob(self.n, gram, new_prob)
total = add_log(total, new_prob)
# All other unseen probabilities - (1-Sum_w(Pr(w|base))) / Z(base)
new_other_prob = log(1.0 - exp(total)) - log(self.voc_size - num_types_after[base_gram])
self.set_prob(self.n, base_gram + ('<OTHER>',), new_other_prob)
# Verify probabilities sum to 1
info('Testing...')
l = self.n
for base_gram in grams[l-1].keys():
if base_gram[-1] == '</s>':
continue
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
total = add_log(total, self.get_prob(gram))
total = add_log(total, self.get_prob(base_gram + ('<OTHER>',)) + \
log(self.voc_size - num_types_after[base_gram]))
if abs(total-0.0) > EPSILON:
raise Exception('Bad total for %s: %.32f' % (base_gram, exp(total)))
def _calculate_log_prob(self, l, grams, gram_count, prev_gram_count, num_types_after, num_tokens_after):
# No need to smooth 1-grams obviously...
if l == 1 or self.smoothing == 'none' or (self.smoothing == 'ls' and self.lmbd == 0):
if prev_gram_count == 0:
# No prior information, assume uniform distribution
log_prob = -log(self.voc_size)
elif gram_count == 0:
log_prob = LOGZERO | log_prob = log(gram_count) - log(prev_gram_count)
elif self.smoothing == 'ls':
log_prob = log(gram_count + self.lmbd) - log(prev_gram_count + self.lmbd * self.voc_size)
elif self.smoothing == 'wb':
z = self.voc_size - num_types_after
if gram_count == 0:
if num_types_after == 0:
log_prob = LOGZERO
else:
log_prob = log(num_types_after) - (log(z) + log(num_tokens_after + num_types_after))
else:
log_prob = log(gram_count) - log(num_tokens_after + num_types_after)
else:
raise Exception('Invalid smoothing %s' % self.smoothing)
return log_prob
def _calculate_interpolated_prob(self, gram, log_lmbds):
return reduce(add_log, (log_lmbds[k] + self.get_prob(gram[-k-1:]) for k in xrange(self.n)))
def _calculate_lmbds_loglikelihood(self, base, log_lmbds, grams, types_after, num_tokens_after):
if num_tokens_after[base] == 0:
return 0
log_likelihood = 0
for w in types_after[base]:
gram = base + (w,)
val = self._calculate_interpolated_prob(gram, log_lmbds)
log_likelihood += grams[self.n][gram] * val
log_likelihood /= num_tokens_after[base]
return log_likelihood
def load_test_file(n, lines):
n_grams = []
for line in lines:
try:
l = ['<s>'] + tokenize(line) + ['</s>']
except:
continue
n_grams.extend((l[max(0, x-n+1):x+1] for x in xrange(len(l))))
return n_grams
def normalize(a):
s = float(sum(a))
for i in a:
a[i] /= s | else: | random_line_split |
data.py | #!/usr/bin/env python2.7
import re
import string
from collections import defaultdict, namedtuple
from functools import partial
from itertools import count
from progressbar import ProgressBar
from hashlib import sha1
from math import log10
import os
import sys
LOGZERO = -sys.maxint - 1
exp = partial(pow, 10)
log = lambda x: LOGZERO if x == 0 else log10(x)
ALPHABET = [chr(ord('a') + i) for i in xrange(ord('z') - ord('a') + 1)] + [chr(ord('A') + i) for i in xrange(ord('Z') - ord('A') + 1)]
EM_EPSILON = 10**-6
EPSILON = 10**-8
def tokenize(s):
def is_word(x):
return len(x) and x[0] in ALPHABET
def convert(x):
return re.sub('[^a-zA-Z]', '', x).lower()
return filter(is_word, map(convert, re.findall(r"[\w'.]+", s)))
def | (x, y):
x,y = max(x,y), min(x,y)
if y <= LOGZERO:
return x
negdiff = y-x
return x + log(1 + exp(negdiff))
class LanguageModel(object):
def __init__(self, n):
self.smoothing = None
self.interpolate = False
self.lmbd = 0
self.models = defaultdict(dict)
self.voc = []
self.n = n
self.prob_no_information = LOGZERO
def set_model(self, n, model):
self.models[n] = model
def setn(self, n):
self.n = n
def set_voc(self, voc):
self.voc = voc
self.voc_size = len(voc)
def set_prob(self, n, words, log_prob):
w = tuple(words)
self.models[n][w] = log_prob
def set_smoothing(self, s, lmbd=1.0):
self.smoothing = s
if self.smoothing == 'ls':
self.lmbd = lmbd
elif self.smoothing == 'wb':
self.interpolate = True
def get_smoothing(self):
return self.smoothing, self.lmbd
def get_prob(self, words):
n = len(words)
words = tuple(w if w[0] == '<' or w in self.voc else '<UNK>' for w in words)
d = self.models[n]
if words in d:
return d[words]
if len(words) > 1 and words[:-1] not in self.models[n-1]:
# Previous words were not seen
if self.prob_no_information != LOGZERO:
# Use probability when there's no information
return self.prob_no_information
else:
# Backoff
return self.get_prob(words[1:])
# Check whether we did some smoothing which raised nonzero probs to some value
other = words[:-1] + ('<OTHER>',)
if other in d:
return d[other]
# Shouldn't reach here!
raise Exception(words)
def __getitem__(self, item):
return self.models[item]
def __len__(self):
return len(self.models)
def dump(self, output_file):
with open(output_file, 'wb') as f:
f.write('\\data\\\n')
for n in self.models:
f.write('ngram %d=%d\n' % (n, len(self.models[n])))
f.write('\n')
f.write('\\smoothing\\\n')
f.write('%s\t%.32f\n' % (self.smoothing, self.lmbd))
f.write('%.32f\n' % self.prob_no_information)
f.write('\n')
for n in self.models:
f.write('\\%d-grams:\n' % n)
for words, prob in self.models[n].iteritems():
f.write('%.32f\t%s\n' % (prob ," ".join(words)))
@staticmethod
def load(f):
# \data\
f.readline()
ngram = 0
while True:
l = f.readline().strip()
if not l:
break
ngram = max(ngram, int(l.split('ngram ')[1].split('=')[0]))
assert ngram != 0, "Can't find ngram in file!"
lm = LanguageModel(ngram)
# \smoothing\
f.readline()
smooth_line = f.readline().strip()
lm.prob_no_information = float(f.readline().strip())
f.readline()
smoothing, lmbd = smooth_line.split()
lm.set_smoothing(smoothing, float(lmbd))
# N-grams
current_ngram = 0
voc = set()
while True:
l = f.readline().strip()
if not l:
break
elif l.startswith('\\'): # descriptor
current_ngram = int(l[1])
else: # data line
assert current_ngram != 0, 'Invalid n-gram'
log_prob, words = l.split('\t', 1)
log_prob = float(log_prob)
words = tuple(words.split(' '))
lm.set_prob(current_ngram, words, log_prob)
if current_ngram == 1:
voc.add(words[0])
lm.set_voc(voc)
return lm
def _count_grams(self, lines_tokens):
''' Count ngrams in a given list of tokenized lines '''
# T(w) in Witten-Bell smoothing
# types_after[w] = set of all types that occur after w
types_after = defaultdict(set)
# N(w) in Witten-Bell smoothing
# num_tokens_after[w] = number of tokens that occur after w
num_tokens_after = defaultdict(int)
tokens = 0
grams = [defaultdict(int) for _ in xrange(self.n+1)]
voc = set()
for i, words in enumerate(lines_tokens):
for word in words:
voc.add(word)
num_words = len(words)
tokens += num_words
for l in xrange(1, self.n+1):
for j in xrange (l, num_words+1):
gram = tuple(words[j-l:j])
if l > 1:
# Account T(w) and N(w) in WB smoothing
prev = gram[:-1]
types_after[prev].add(gram[-1])
num_tokens_after[prev] += 1
grams[l][gram] += 1
grams[0] = dict()
grams[0][tuple()] = tokens
return tokens, grams, voc, types_after, num_tokens_after
def train_model(self, lines, silent=False):
# Print to log if needed
if silent:
def info(s):
pass
else:
def info(s):
print s
if self.interpolate:
n_held_out = int(len(lines)*.1)
held_out = lines[:n_held_out]
data = lines[n_held_out:]
else:
data = lines
info('Tokenizing...')
data_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in data]
if self.interpolate:
held_out_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in held_out]
# Build the lexicon as words that appear more than once, should be at least 99% of number of tokens
info('Building lexicon...')
lexicon = set()
unk = set()
counts = defaultdict(int)
for l in data_tokens:
for w in l:
if w[0] == '<':
continue
counts[w] += 1
if counts[w] == 1:
unk.add(w)
elif counts[w] == 2:
lexicon.add(w)
unk.remove(w)
del counts
while len(lexicon) < 0.99*(len(lexicon) + len(unk)):
for w in unk:
unk.remove(w)
lexicon.add(w)
break
del unk
info('Replacing OOV words with <UNK>...')
for l in data_tokens:
for i in xrange(len(l)):
if l[i][0] != '<' and l[i] not in lexicon:
l[i] = '<UNK>'
del lexicon
info('Counting ngrams...')
tokens, grams, voc, types_after, num_tokens_after = self._count_grams(data_tokens)
self.set_voc(voc)
num_types_after = defaultdict(int, ((x, len(types_after[x])) for x in types_after))
num_types_after[tuple()] = self.voc_size
num_tokens_after[tuple()] = tokens
info('Calculating probabilities...')
self.set_prob(1, ('<OTHER>',), LOGZERO)
for l in xrange(1, self.n+1):
for gram, gram_count in grams[l].iteritems():
prev = gram[:-1]
log_prob = self._calculate_log_prob(l, grams, gram_count, grams[l-1][prev],
num_types_after[prev], num_tokens_after[prev])
self.set_prob(l, gram, log_prob)
# Calculate probabilities for unseen ngrams (after smoothing they get a nonzero value...)
for l in xrange(2, self.n+1):
for base_gram, base_gram_count in grams[l-1].iteritems():
log_prob = self._calculate_log_prob(l, grams, 0, base_gram_count, num_types_after[base_gram],
num_tokens_after[base_gram])
self.set_prob(l, base_gram + ('<OTHER>',), log_prob)
# Pr(W_n | W_{n-1}) when C(W_n)=0, C(W_{n-1})=0
self.prob_no_information = self._calculate_log_prob(2, grams, 0, 0, 0, 0)
if not self.interpolate:
return
info('Interpolating...')
_, held_out_grams, _, held_out_types_after, held_out_num_tokens_after = \
self._count_grams(held_out_tokens)
for base_gram in grams[self.n-1]:
# Use EM to calculate lambda-values which provide max log-likelihood on held-out data
# Based on: https://www.cs.cmu.edu/~roni/11761/Presentations/degenerateEM.pdf
if held_out_num_tokens_after[base_gram] == 0:
# This base gram is so rare that it doesn't appear in the held-out data -- interpolation
# won't matter much in this case anyway!
continue
log_lmbds = [log(1) - log(self.n)]*self.n
prev_loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
for t in count(1):
# E-step
# log_denoms[w] = lg(denominator for word w)
log_denoms = dict()
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
log_denoms[w] = self._calculate_interpolated_prob(gram, log_lmbds)
# M-step
for j in xrange(self.n):
new_log_lmbd = LOGZERO
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
val = log_lmbds[j] + self.get_prob(gram[-j-1:]) \
+ log(held_out_grams[self.n][gram]) - log_denoms[w]
new_log_lmbd = add_log(new_log_lmbd, val)
log_lmbds[j] = new_log_lmbd - log(held_out_num_tokens_after[base_gram])
# Check for convergence
loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
assert loglikelihood >= prev_loglikelihood
if loglikelihood - prev_loglikelihood <= EM_EPSILON:
break
prev_loglikelihood = loglikelihood
# Calculate the new interpolated probabilities
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
new_prob = self._calculate_interpolated_prob(gram, log_lmbds)
self.set_prob(self.n, gram, new_prob)
total = add_log(total, new_prob)
# All other unseen probabilities - (1-Sum_w(Pr(w|base))) / Z(base)
new_other_prob = log(1.0 - exp(total)) - log(self.voc_size - num_types_after[base_gram])
self.set_prob(self.n, base_gram + ('<OTHER>',), new_other_prob)
# Verify probabilities sum to 1
info('Testing...')
l = self.n
for base_gram in grams[l-1].keys():
if base_gram[-1] == '</s>':
continue
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
total = add_log(total, self.get_prob(gram))
total = add_log(total, self.get_prob(base_gram + ('<OTHER>',)) + \
log(self.voc_size - num_types_after[base_gram]))
if abs(total-0.0) > EPSILON:
raise Exception('Bad total for %s: %.32f' % (base_gram, exp(total)))
def _calculate_log_prob(self, l, grams, gram_count, prev_gram_count, num_types_after, num_tokens_after):
# No need to smooth 1-grams obviously...
if l == 1 or self.smoothing == 'none' or (self.smoothing == 'ls' and self.lmbd == 0):
if prev_gram_count == 0:
# No prior information, assume uniform distribution
log_prob = -log(self.voc_size)
elif gram_count == 0:
log_prob = LOGZERO
else:
log_prob = log(gram_count) - log(prev_gram_count)
elif self.smoothing == 'ls':
log_prob = log(gram_count + self.lmbd) - log(prev_gram_count + self.lmbd * self.voc_size)
elif self.smoothing == 'wb':
z = self.voc_size - num_types_after
if gram_count == 0:
if num_types_after == 0:
log_prob = LOGZERO
else:
log_prob = log(num_types_after) - (log(z) + log(num_tokens_after + num_types_after))
else:
log_prob = log(gram_count) - log(num_tokens_after + num_types_after)
else:
raise Exception('Invalid smoothing %s' % self.smoothing)
return log_prob
def _calculate_interpolated_prob(self, gram, log_lmbds):
return reduce(add_log, (log_lmbds[k] + self.get_prob(gram[-k-1:]) for k in xrange(self.n)))
def _calculate_lmbds_loglikelihood(self, base, log_lmbds, grams, types_after, num_tokens_after):
if num_tokens_after[base] == 0:
return 0
log_likelihood = 0
for w in types_after[base]:
gram = base + (w,)
val = self._calculate_interpolated_prob(gram, log_lmbds)
log_likelihood += grams[self.n][gram] * val
log_likelihood /= num_tokens_after[base]
return log_likelihood
def load_test_file(n, lines):
n_grams = []
for line in lines:
try:
l = ['<s>'] + tokenize(line) + ['</s>']
except:
continue
n_grams.extend((l[max(0, x-n+1):x+1] for x in xrange(len(l))))
return n_grams
def normalize(a):
s = float(sum(a))
for i in a:
a[i] /= s
| add_log | identifier_name |
data.py | #!/usr/bin/env python2.7
import re
import string
from collections import defaultdict, namedtuple
from functools import partial
from itertools import count
from progressbar import ProgressBar
from hashlib import sha1
from math import log10
import os
import sys
LOGZERO = -sys.maxint - 1
exp = partial(pow, 10)
log = lambda x: LOGZERO if x == 0 else log10(x)
ALPHABET = [chr(ord('a') + i) for i in xrange(ord('z') - ord('a') + 1)] + [chr(ord('A') + i) for i in xrange(ord('Z') - ord('A') + 1)]
EM_EPSILON = 10**-6
EPSILON = 10**-8
def tokenize(s):
def is_word(x):
return len(x) and x[0] in ALPHABET
def convert(x):
return re.sub('[^a-zA-Z]', '', x).lower()
return filter(is_word, map(convert, re.findall(r"[\w'.]+", s)))
def add_log(x, y):
x,y = max(x,y), min(x,y)
if y <= LOGZERO:
return x
negdiff = y-x
return x + log(1 + exp(negdiff))
class LanguageModel(object):
def __init__(self, n):
self.smoothing = None
self.interpolate = False
self.lmbd = 0
self.models = defaultdict(dict)
self.voc = []
self.n = n
self.prob_no_information = LOGZERO
def set_model(self, n, model):
self.models[n] = model
def setn(self, n):
self.n = n
def set_voc(self, voc):
self.voc = voc
self.voc_size = len(voc)
def set_prob(self, n, words, log_prob):
w = tuple(words)
self.models[n][w] = log_prob
def set_smoothing(self, s, lmbd=1.0):
self.smoothing = s
if self.smoothing == 'ls':
self.lmbd = lmbd
elif self.smoothing == 'wb':
self.interpolate = True
def get_smoothing(self):
return self.smoothing, self.lmbd
def get_prob(self, words):
n = len(words)
words = tuple(w if w[0] == '<' or w in self.voc else '<UNK>' for w in words)
d = self.models[n]
if words in d:
return d[words]
if len(words) > 1 and words[:-1] not in self.models[n-1]:
# Previous words were not seen
if self.prob_no_information != LOGZERO:
# Use probability when there's no information
return self.prob_no_information
else:
# Backoff
return self.get_prob(words[1:])
# Check whether we did some smoothing which raised nonzero probs to some value
other = words[:-1] + ('<OTHER>',)
if other in d:
return d[other]
# Shouldn't reach here!
raise Exception(words)
def __getitem__(self, item):
return self.models[item]
def __len__(self):
return len(self.models)
def dump(self, output_file):
with open(output_file, 'wb') as f:
f.write('\\data\\\n')
for n in self.models:
f.write('ngram %d=%d\n' % (n, len(self.models[n])))
f.write('\n')
f.write('\\smoothing\\\n')
f.write('%s\t%.32f\n' % (self.smoothing, self.lmbd))
f.write('%.32f\n' % self.prob_no_information)
f.write('\n')
for n in self.models:
f.write('\\%d-grams:\n' % n)
for words, prob in self.models[n].iteritems():
f.write('%.32f\t%s\n' % (prob ," ".join(words)))
@staticmethod
def load(f):
# \data\
f.readline()
ngram = 0
while True:
l = f.readline().strip()
if not l:
break
ngram = max(ngram, int(l.split('ngram ')[1].split('=')[0]))
assert ngram != 0, "Can't find ngram in file!"
lm = LanguageModel(ngram)
# \smoothing\
f.readline()
smooth_line = f.readline().strip()
lm.prob_no_information = float(f.readline().strip())
f.readline()
smoothing, lmbd = smooth_line.split()
lm.set_smoothing(smoothing, float(lmbd))
# N-grams
current_ngram = 0
voc = set()
while True:
l = f.readline().strip()
if not l:
break
elif l.startswith('\\'): # descriptor
current_ngram = int(l[1])
else: # data line
assert current_ngram != 0, 'Invalid n-gram'
log_prob, words = l.split('\t', 1)
log_prob = float(log_prob)
words = tuple(words.split(' '))
lm.set_prob(current_ngram, words, log_prob)
if current_ngram == 1:
voc.add(words[0])
lm.set_voc(voc)
return lm
def _count_grams(self, lines_tokens):
''' Count ngrams in a given list of tokenized lines '''
# T(w) in Witten-Bell smoothing
# types_after[w] = set of all types that occur after w
types_after = defaultdict(set)
# N(w) in Witten-Bell smoothing
# num_tokens_after[w] = number of tokens that occur after w
num_tokens_after = defaultdict(int)
tokens = 0
grams = [defaultdict(int) for _ in xrange(self.n+1)]
voc = set()
for i, words in enumerate(lines_tokens):
for word in words:
voc.add(word)
num_words = len(words)
tokens += num_words
for l in xrange(1, self.n+1):
for j in xrange (l, num_words+1):
gram = tuple(words[j-l:j])
if l > 1:
# Account T(w) and N(w) in WB smoothing
prev = gram[:-1]
types_after[prev].add(gram[-1])
num_tokens_after[prev] += 1
grams[l][gram] += 1
grams[0] = dict()
grams[0][tuple()] = tokens
return tokens, grams, voc, types_after, num_tokens_after
def train_model(self, lines, silent=False):
# Print to log if needed
if silent:
def info(s):
pass
else:
def info(s):
print s
if self.interpolate:
n_held_out = int(len(lines)*.1)
held_out = lines[:n_held_out]
data = lines[n_held_out:]
else:
data = lines
info('Tokenizing...')
data_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in data]
if self.interpolate:
held_out_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in held_out]
# Build the lexicon as words that appear more than once, should be at least 99% of number of tokens
info('Building lexicon...')
lexicon = set()
unk = set()
counts = defaultdict(int)
for l in data_tokens:
for w in l:
if w[0] == '<':
continue
counts[w] += 1
if counts[w] == 1:
unk.add(w)
elif counts[w] == 2:
lexicon.add(w)
unk.remove(w)
del counts
while len(lexicon) < 0.99*(len(lexicon) + len(unk)):
for w in unk:
unk.remove(w)
lexicon.add(w)
break
del unk
info('Replacing OOV words with <UNK>...')
for l in data_tokens:
for i in xrange(len(l)):
if l[i][0] != '<' and l[i] not in lexicon:
l[i] = '<UNK>'
del lexicon
info('Counting ngrams...')
tokens, grams, voc, types_after, num_tokens_after = self._count_grams(data_tokens)
self.set_voc(voc)
num_types_after = defaultdict(int, ((x, len(types_after[x])) for x in types_after))
num_types_after[tuple()] = self.voc_size
num_tokens_after[tuple()] = tokens
info('Calculating probabilities...')
self.set_prob(1, ('<OTHER>',), LOGZERO)
for l in xrange(1, self.n+1):
for gram, gram_count in grams[l].iteritems():
prev = gram[:-1]
log_prob = self._calculate_log_prob(l, grams, gram_count, grams[l-1][prev],
num_types_after[prev], num_tokens_after[prev])
self.set_prob(l, gram, log_prob)
# Calculate probabilities for unseen ngrams (after smoothing they get a nonzero value...)
for l in xrange(2, self.n+1):
for base_gram, base_gram_count in grams[l-1].iteritems():
log_prob = self._calculate_log_prob(l, grams, 0, base_gram_count, num_types_after[base_gram],
num_tokens_after[base_gram])
self.set_prob(l, base_gram + ('<OTHER>',), log_prob)
# Pr(W_n | W_{n-1}) when C(W_n)=0, C(W_{n-1})=0
self.prob_no_information = self._calculate_log_prob(2, grams, 0, 0, 0, 0)
if not self.interpolate:
return
info('Interpolating...')
_, held_out_grams, _, held_out_types_after, held_out_num_tokens_after = \
self._count_grams(held_out_tokens)
for base_gram in grams[self.n-1]:
# Use EM to calculate lambda-values which provide max log-likelihood on held-out data
# Based on: https://www.cs.cmu.edu/~roni/11761/Presentations/degenerateEM.pdf
if held_out_num_tokens_after[base_gram] == 0:
# This base gram is so rare that it doesn't appear in the held-out data -- interpolation
# won't matter much in this case anyway!
continue
log_lmbds = [log(1) - log(self.n)]*self.n
prev_loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
for t in count(1):
# E-step
# log_denoms[w] = lg(denominator for word w)
log_denoms = dict()
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
log_denoms[w] = self._calculate_interpolated_prob(gram, log_lmbds)
# M-step
for j in xrange(self.n):
new_log_lmbd = LOGZERO
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
val = log_lmbds[j] + self.get_prob(gram[-j-1:]) \
+ log(held_out_grams[self.n][gram]) - log_denoms[w]
new_log_lmbd = add_log(new_log_lmbd, val)
log_lmbds[j] = new_log_lmbd - log(held_out_num_tokens_after[base_gram])
# Check for convergence
loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
assert loglikelihood >= prev_loglikelihood
if loglikelihood - prev_loglikelihood <= EM_EPSILON:
break
prev_loglikelihood = loglikelihood
# Calculate the new interpolated probabilities
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
new_prob = self._calculate_interpolated_prob(gram, log_lmbds)
self.set_prob(self.n, gram, new_prob)
total = add_log(total, new_prob)
# All other unseen probabilities - (1-Sum_w(Pr(w|base))) / Z(base)
new_other_prob = log(1.0 - exp(total)) - log(self.voc_size - num_types_after[base_gram])
self.set_prob(self.n, base_gram + ('<OTHER>',), new_other_prob)
# Verify probabilities sum to 1
info('Testing...')
l = self.n
for base_gram in grams[l-1].keys():
if base_gram[-1] == '</s>':
continue
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
total = add_log(total, self.get_prob(gram))
total = add_log(total, self.get_prob(base_gram + ('<OTHER>',)) + \
log(self.voc_size - num_types_after[base_gram]))
if abs(total-0.0) > EPSILON:
raise Exception('Bad total for %s: %.32f' % (base_gram, exp(total)))
def _calculate_log_prob(self, l, grams, gram_count, prev_gram_count, num_types_after, num_tokens_after):
# No need to smooth 1-grams obviously...
if l == 1 or self.smoothing == 'none' or (self.smoothing == 'ls' and self.lmbd == 0):
if prev_gram_count == 0:
# No prior information, assume uniform distribution
log_prob = -log(self.voc_size)
elif gram_count == 0:
log_prob = LOGZERO
else:
log_prob = log(gram_count) - log(prev_gram_count)
elif self.smoothing == 'ls':
log_prob = log(gram_count + self.lmbd) - log(prev_gram_count + self.lmbd * self.voc_size)
elif self.smoothing == 'wb':
z = self.voc_size - num_types_after
if gram_count == 0:
|
else:
log_prob = log(gram_count) - log(num_tokens_after + num_types_after)
else:
raise Exception('Invalid smoothing %s' % self.smoothing)
return log_prob
def _calculate_interpolated_prob(self, gram, log_lmbds):
return reduce(add_log, (log_lmbds[k] + self.get_prob(gram[-k-1:]) for k in xrange(self.n)))
def _calculate_lmbds_loglikelihood(self, base, log_lmbds, grams, types_after, num_tokens_after):
if num_tokens_after[base] == 0:
return 0
log_likelihood = 0
for w in types_after[base]:
gram = base + (w,)
val = self._calculate_interpolated_prob(gram, log_lmbds)
log_likelihood += grams[self.n][gram] * val
log_likelihood /= num_tokens_after[base]
return log_likelihood
def load_test_file(n, lines):
n_grams = []
for line in lines:
try:
l = ['<s>'] + tokenize(line) + ['</s>']
except:
continue
n_grams.extend((l[max(0, x-n+1):x+1] for x in xrange(len(l))))
return n_grams
def normalize(a):
s = float(sum(a))
for i in a:
a[i] /= s
| if num_types_after == 0:
log_prob = LOGZERO
else:
log_prob = log(num_types_after) - (log(z) + log(num_tokens_after + num_types_after)) | conditional_block |
lptim.rs | //! Low-Power Timer (LPTIM) support.
use crate::gpio::{self, gpiob};
use crate::hal;
use crate::pac::LPTIM;
use crate::pwr::PWR;
use crate::rcc::{Enable, Rcc, Reset};
use cast::{u32, u64};
use core::convert::TryFrom;
use core::marker::PhantomData;
use embedded_time::duration::Microseconds;
use embedded_time::rate::Hertz;
use void::Void;
mod sealed {
pub trait Sealed {}
}
/// Low-Power Timer counting in one-shot mode.
pub enum OneShot {}
/// Low-Power Timer counting in periodic mode.
pub enum Periodic {}
/// Low-Power Timer in encoder mode.
pub enum Encoder {}
impl sealed::Sealed for OneShot {}
impl sealed::Sealed for Periodic {}
impl sealed::Sealed for Encoder {}
/// Marker trait for counter directions.
pub trait CountMode: sealed::Sealed {}
impl CountMode for OneShot {}
impl CountMode for Periodic {}
impl CountMode for Encoder {}
/// Clock source selection for the Low-Power Timer `LPTIM`.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum | {
/// Drive LPTIM with APB1 clock.
Apb1 = 0b00,
/// Drive LPTIM with Low-Speed Internal (LSI) clock.
///
/// The user has to ensure that the LSI clock is running, or the timer won't
/// start counting.
Lsi = 0b01,
/// Drive LPTIM with Internal 16 MHz clock.
Hsi16 = 0b10,
/// Drive LPTIM with Low-Speed External (LSE) clock at 32.768 kHz.
///
/// The user has to ensure that the LSE clock is running, or the timer won't
/// start counting.
Lse = 0b11,
}
/// Interrupt enable flags.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
pub struct Interrupts {
/// Encoder direction change to down.
pub enc_dir_down: bool,
/// Encoder direction change to up.
pub enc_dir_up: bool,
/// ARR register update successful.
pub autoreload_update_ok: bool,
/// CMP register update successful.
pub compare_update_ok: bool,
/// Valid edge on ext. trigger input.
pub ext_trig: bool,
/// ARR register matches current CNT value.
pub autoreload_match: bool,
/// CMP register matches current CNT value.
pub compare_match: bool,
}
/// Low-Power Timer (`LPTIM`).
///
/// The Low-Power Timer is a 16-bit timer with a prescaler of up to 128. It can run off of the APB1,
/// LSI, HSI16, or LSE clocks. With LSE, the slowest clock at 32.768 kHz, this results in a maximum
/// timeout of 256 seconds, or 4 minutes and 16 seconds.
///
/// The timer can be initialized either in one-shot mode or in periodic mode, using `init_oneshot`
/// or `init_periodic` respectively. In periodic mode, the embedded-hal `Periodic` marker trait is
/// implemented and the `CountDown` implementation uses `Hertz` as the time unit. In one-shot mode,
/// the `CountDown` implementation instead uses `Microseconds`, allowing for a multi-second timeout
/// to be configured (with the tradeoff being a larger code size due to use of 64-bit arithmetic).
pub struct LpTimer<M: CountMode> {
lptim: LPTIM,
input_freq: Hertz,
_mode: PhantomData<M>,
}
impl LpTimer<Periodic> {
/// Initializes the Low-Power Timer in periodic mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_periodic(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<OneShot> {
/// Initializes the Low-Power Timer in one-shot mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_oneshot(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<Encoder> {
/// Initializes the Low-Power Timer in encoder mode.
///
/// The `start` method must be called to enable the encoder input.
pub fn init_encoder(
lptim: LPTIM,
pwr: &mut PWR,
rcc: &mut Rcc,
clk: ClockSrc,
(pb5, pb7): (gpiob::PB5<gpio::Analog>, gpiob::PB7<gpio::Analog>),
) -> Self {
pb5.set_alt_mode(gpio::AltMode::AF2);
pb7.set_alt_mode(gpio::AltMode::AF2);
Self::init(lptim, pwr, rcc, clk)
}
// TODO: Dedupe this fn with configure() function in `impl<M: CountMode> LpTimer<M>`
fn configure_encoder(&mut self, arr: u16) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
// Configure in encoder mode
self.lptim.cfgr.write(|w| {
w
// Make sure prescaler is disabled. Encoder mode forbids prescaling.
.presc()
.div1()
// Put timer into encoder mode
.enc()
.set_bit()
// Choose internal clock source - external sources not supported in encoder mode.
.cksel()
.clear_bit()
// Start counting from software trigger
.trigen()
.sw()
// Clock polarity
.ckpol()
.both_edges()
});
// Enable timer
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(arr));
}
/// Enable the timer and begin counting encoder pulses.
///
/// The provided value is stored in the ARR (Auto Reload Register). The timer's internal counter
/// will wrap when this value is reached.
pub fn enable(&mut self, arr: u16) {
self.configure_encoder(arr);
// Enable timer, enable continuous mode
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
/// Disable the timer.
pub fn disable(&mut self) {
self.lptim.cr.write(|w| w.enable().clear_bit());
}
/// Get the current count of the encoder.
pub fn count(&self) -> u16 {
(self.lptim.cnt.read().bits() & 0xffff) as u16
}
/// Clear all LPTIM interrupt flags
pub fn clear_flags(&self) {
self.lptim.icr.write(|w| unsafe { w.bits(0x7f) });
}
}
impl<M: CountMode> LpTimer<M> {
fn init(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
// `pwr` is not used. It is used as a marker that guarantees that `PWR.CR` is set so this
// function can set the `RCC.LSEON` bit, which is otherwise write protected.
let _ = pwr;
// Enable selected clock and determine its frequency
let input_freq = match clk {
ClockSrc::Apb1 => rcc.clocks.apb1_clk(), // always enabled
ClockSrc::Lsi => {
// Turn on LSI
rcc.rb.csr.modify(|_, w| w.lsion().set_bit());
// Wait for LSI to be ready
while rcc.rb.csr.read().lsirdy().bit_is_clear() {}
Hertz(37_000)
}
ClockSrc::Hsi16 => {
// Turn on HSI16
rcc.rb.cr.modify(|_, w| w.hsi16on().set_bit());
// Wait for HSI16 to be ready
while rcc.rb.cr.read().hsi16rdyf().bit_is_clear() {}
Hertz(16_000_000)
}
ClockSrc::Lse => {
// Turn on LSE
rcc.rb.csr.modify(|_, w| w.lseon().set_bit());
// Wait for LSE to be ready
while rcc.rb.csr.read().lserdy().bit_is_clear() {}
Hertz(32_768)
}
};
// Select and enable clock. Right now we only support the internal RCC clocks, but LPTIM can
// also run as a counter with a dedicated external input.
rcc.rb.ccipr.modify(|_, w| w.lptim1sel().bits(clk as u8));
LPTIM::enable(rcc);
LPTIM::reset(rcc);
Self {
lptim,
input_freq,
_mode: PhantomData,
}
}
/// Disables the timer and configures it so that starting it will make it fire at the given
/// frequency.
fn configure(&mut self, conf: TimeConf) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
self.lptim
.cfgr
.write(|w| w.presc().bits(conf.psc_encoded).timout().set_bit());
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(conf.arr));
}
/// Disables and destructs the timer, returning the raw `LPTIM` peripheral.
pub fn free(self) -> LPTIM {
self.lptim.cr.reset();
self.lptim
}
/// Disables the timer and enables the given interrupts.
pub fn enable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().enabled();
}
if interrupts.enc_dir_up {
w.upie().enabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().enabled();
}
if interrupts.compare_update_ok {
w.cmpokie().enabled();
}
if interrupts.ext_trig {
w.exttrigie().enabled();
}
if interrupts.autoreload_match {
w.arrmie().enabled();
}
if interrupts.compare_match {
w.cmpmie().enabled();
}
w
})
}
/// Disables the timer and disables the given interrupts.
pub fn disable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().disabled();
}
if interrupts.enc_dir_up {
w.upie().disabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().disabled();
}
if interrupts.compare_update_ok {
w.cmpokie().disabled();
}
if interrupts.ext_trig {
w.exttrigie().disabled();
}
if interrupts.autoreload_match {
w.arrmie().disabled();
}
if interrupts.compare_match {
w.cmpmie().disabled();
}
w
})
}
}
impl hal::timer::CountDown for LpTimer<Periodic> {
type Time = Hertz;
fn start<T>(&mut self, freq: T)
where
T: Into<Hertz>,
{
self.configure(TimeConf::calculate_freq(self.input_freq, freq.into()));
// Start LPTIM in continuous mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
fn wait(&mut self) -> nb::Result<(), Void> {
if self.lptim.isr.read().arrm().bit_is_clear() {
Err(nb::Error::WouldBlock)
} else {
self.lptim.icr.write(|w| w.arrmcf().set_bit());
Ok(())
}
}
}
impl hal::timer::Periodic for LpTimer<Periodic> {}
impl hal::timer::CountDown for LpTimer<OneShot> {
type Time = Microseconds;
fn start<T>(&mut self, period: T)
where
T: Into<Microseconds>,
{
self.configure(TimeConf::calculate_period(self.input_freq, period.into()));
// Start LPTIM in one-shot mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().sngstrt().set_bit());
}
fn wait(&mut self) -> nb::Result<(), Void> {
if self.lptim.isr.read().arrm().bit_is_clear() {
Err(nb::Error::WouldBlock)
} else {
self.lptim.icr.write(|w| w.arrmcf().set_bit());
Ok(())
}
}
}
#[derive(Copy, Clone)]
struct TimeConf {
psc_encoded: u8,
arr: u16,
}
impl TimeConf {
const ARR_MAX: u16 = u16::max_value();
/// Calculates prescaler and autoreload value for producing overflows at a rate of
/// `output_freq`.
fn calculate_freq(input_freq: Hertz, output_freq: Hertz) -> Self {
// Fi = Frequency of input clock
// Fo = Output frequency (frequency of timer overflows, using ARR)
// psc = prescaler (must be power of two in range 1..=128)
// We know Fi and Fo, and want to know psc and ARR.
//
// The timer works like this:
// Fo = (Fi / psc) / ARR
//
// Therefore:
// Fo * ARR = Fi / psc
// Fo * ARR * psc = Fi
// ARR = (Fi / Fo) / psc
// psc = (Fi / Fo) / ARR
//
// We first calculate `psc` by assuming the largest `ARR` value, and round the result to the
// next power of two. If that's > 128, the chosen frequency is too slow for the timer and
// we panic. Otherwise we use that `psc` to calculate the real `ARR`.
// Add `ARR_MAX - 1` to round the result upwards
let psc = ((input_freq.0 / output_freq.0) + (u32(Self::ARR_MAX) - 1)) / u32(Self::ARR_MAX);
let psc = psc.next_power_of_two(); // always >= 1
assert!(psc <= 128);
// This calculation must be in u16 range because we assume the max. ARR value above ^
let arr = u16::try_from((input_freq.0 / output_freq.0) / psc).unwrap();
// PSC encoding is N where `psc = 2^N`
let psc_encoded = psc.trailing_zeros() as u8;
Self { psc_encoded, arr }
}
/// Calculates prescaler and autoreload value for producing overflows after every
/// `output_period`.
fn calculate_period(input_freq: Hertz, output_period: Microseconds) -> Self {
// Here, the `output_period` can be very long, resulting in an output frequency of < 1 Hz.
// Fi = Frequency of input clock
// Fo = Output frequency (frequency of timer overflows, using ARR)
// Po = 1 / Fo = Output Period
// psc = prescaler (must be power of two in range 1..=128)
// We know Fi and Fo, and want to know psc and ARR.
//
// The timer works like this:
// Fo = 1 / Po = (Fi / psc) / ARR
//
// Therefore:
// ARR / Po = Fi / psc
// (ARR * psc) / Po = Fi
// ARR * psc = Fi * Po
// ARR = (Fi * Po) / psc
// psc = (Fi * Po) / ARR
//
// We first calculate `psc` by assuming the largest `ARR` value, and round the result to the
// next power of two. If that's > 128, the chosen period is too long for the timer and we
// panic. Otherwise we use that `psc` to calculate the real `ARR`.
// First, calculate the product `Fi * Po`. Since `output_period` is in µs, we have to divide
// it by 1_000_000 to get seconds, without losing much precision. We can divide either of
// the multiplicants, or the resulting product. Dividing the resulting product results in
// the least amount of rouding error, but might require 64-bit multiplication and division,
// which is very expensive. Dividing either of the multiplicands by 1_000_000 can easily
// result in significant rounding error that makes this API useless.
let fi_po = u32(u64(input_freq.0) * u64(output_period.0) / 1_000_000).unwrap();
// Add `ARR_MAX - 1` to round the result upwards
let psc = (fi_po + (u32(Self::ARR_MAX) - 1)) / u32(Self::ARR_MAX);
assert!(psc > 0); // if 0, the output period is too short to be produced from input_freq
let psc = psc.next_power_of_two(); // always >= 1
assert!(psc <= 128); // if > 128, the output period is too long to be produced from input_freq
// This calculation must be in u16 range because we assume the max. ARR value above ^
let arr = (fi_po / psc) as u16;
// PSC encoding is N where `psc = 2^N`
let psc_encoded = psc.trailing_zeros() as u8;
Self { psc_encoded, arr }
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Test-only methods.
impl TimeConf {
fn psc(&self) -> u8 {
1 << self.psc_encoded
}
/// Calculates the output frequency if the timer is configured according to `self` and is run at
/// `input_freq`.
fn output_freq(&self, input_freq: Hertz) -> Hertz {
Hertz(input_freq.0 / u32(self.psc()) / u32(self.arr))
}
fn output_period(&self, input_freq: Hertz) -> Microseconds {
Microseconds(
u32(u64(self.psc()) * u64(self.arr) * 1_000_000 / u64(input_freq.0)).unwrap(),
)
}
}
#[test]
fn calc_from_freq() {
// no psc necessary (so psc=1)
let c = TimeConf::calculate_freq(32_768.hz(), 1.hz());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 32_768);
assert_eq!(c.output_freq(32_768.hz()), 1.hz());
// barely works with psc=1
let c = TimeConf::calculate_freq(65535.hz(), 1.hz());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 65535);
assert_eq!(c.output_freq(65535.hz()), 1.hz());
// barely needs psc=2
let c = TimeConf::calculate_freq(65536.hz(), 1.hz());
assert_eq!(c.psc(), 2);
assert_eq!(c.arr, 32768);
assert_eq!(c.output_freq(65536.hz()), 1.hz());
// maximum possible ratio, needs psc=128 and max ARR
let c = TimeConf::calculate_freq((65535 * 128).hz(), 1.hz());
assert_eq!(c.psc(), 128);
assert_eq!(c.arr, 65535);
assert_eq!(c.output_freq((65535 * 128).hz()), 1.hz());
}
#[test]
#[should_panic(expected = "assertion failed: psc <= 128")]
fn freq_ratio_too_large() {
TimeConf::calculate_freq((65535 * 128 + 1).hz(), 1.hz());
}
#[test]
fn calc_from_period() {
// 1:1 ratio
let c = TimeConf::calculate_period(1_000.hz(), 1_000.us());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 1);
assert_eq!(c.output_freq(1_000.hz()), 1_000.hz());
// real-world test: go from 32.768 kHz to 10 s
let c = TimeConf::calculate_period(32_768.hz(), 10_000_000.us());
assert_eq!(c.psc(), 8);
assert_eq!(c.arr, 40960);
assert_eq!(c.output_freq(32_768.hz()), 0.hz());
assert_eq!(c.output_period(32_768.hz()), 10_000_000.us());
}
#[test]
#[should_panic(expected = "assertion failed: psc > 0")]
fn period_too_short() {
TimeConf::calculate_period(1_000.hz(), 999.us());
}
}
| ClockSrc | identifier_name |
lptim.rs | //! Low-Power Timer (LPTIM) support.
use crate::gpio::{self, gpiob};
use crate::hal;
use crate::pac::LPTIM;
use crate::pwr::PWR;
use crate::rcc::{Enable, Rcc, Reset};
use cast::{u32, u64};
use core::convert::TryFrom;
use core::marker::PhantomData;
use embedded_time::duration::Microseconds;
use embedded_time::rate::Hertz;
use void::Void;
mod sealed {
pub trait Sealed {}
}
/// Low-Power Timer counting in one-shot mode.
pub enum OneShot {}
/// Low-Power Timer counting in periodic mode.
pub enum Periodic {}
/// Low-Power Timer in encoder mode.
pub enum Encoder {}
impl sealed::Sealed for OneShot {}
impl sealed::Sealed for Periodic {}
impl sealed::Sealed for Encoder {}
/// Marker trait for counter directions.
pub trait CountMode: sealed::Sealed {}
impl CountMode for OneShot {}
impl CountMode for Periodic {}
impl CountMode for Encoder {}
/// Clock source selection for the Low-Power Timer `LPTIM`.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ClockSrc {
/// Drive LPTIM with APB1 clock.
Apb1 = 0b00,
/// Drive LPTIM with Low-Speed Internal (LSI) clock.
///
/// The user has to ensure that the LSI clock is running, or the timer won't
/// start counting.
Lsi = 0b01,
/// Drive LPTIM with Internal 16 MHz clock.
Hsi16 = 0b10,
/// Drive LPTIM with Low-Speed External (LSE) clock at 32.768 kHz.
///
/// The user has to ensure that the LSE clock is running, or the timer won't
/// start counting.
Lse = 0b11,
}
/// Interrupt enable flags.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
pub struct Interrupts {
/// Encoder direction change to down.
pub enc_dir_down: bool,
/// Encoder direction change to up.
pub enc_dir_up: bool,
/// ARR register update successful.
pub autoreload_update_ok: bool,
/// CMP register update successful.
pub compare_update_ok: bool,
/// Valid edge on ext. trigger input.
pub ext_trig: bool,
/// ARR register matches current CNT value.
pub autoreload_match: bool,
/// CMP register matches current CNT value.
pub compare_match: bool,
}
/// Low-Power Timer (`LPTIM`).
///
/// The Low-Power Timer is a 16-bit timer with a prescaler of up to 128. It can run off of the APB1,
/// LSI, HSI16, or LSE clocks. With LSE, the slowest clock at 32.768 kHz, this results in a maximum
/// timeout of 256 seconds, or 4 minutes and 16 seconds.
///
/// The timer can be initialized either in one-shot mode or in periodic mode, using `init_oneshot`
/// or `init_periodic` respectively. In periodic mode, the embedded-hal `Periodic` marker trait is
/// implemented and the `CountDown` implementation uses `Hertz` as the time unit. In one-shot mode,
/// the `CountDown` implementation instead uses `Microseconds`, allowing for a multi-second timeout
/// to be configured (with the tradeoff being a larger code size due to use of 64-bit arithmetic).
pub struct LpTimer<M: CountMode> {
lptim: LPTIM,
input_freq: Hertz,
_mode: PhantomData<M>,
}
impl LpTimer<Periodic> {
/// Initializes the Low-Power Timer in periodic mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_periodic(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<OneShot> {
/// Initializes the Low-Power Timer in one-shot mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_oneshot(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self |
}
impl LpTimer<Encoder> {
/// Initializes the Low-Power Timer in encoder mode.
///
/// The `start` method must be called to enable the encoder input.
pub fn init_encoder(
lptim: LPTIM,
pwr: &mut PWR,
rcc: &mut Rcc,
clk: ClockSrc,
(pb5, pb7): (gpiob::PB5<gpio::Analog>, gpiob::PB7<gpio::Analog>),
) -> Self {
pb5.set_alt_mode(gpio::AltMode::AF2);
pb7.set_alt_mode(gpio::AltMode::AF2);
Self::init(lptim, pwr, rcc, clk)
}
// TODO: Dedupe this fn with configure() function in `impl<M: CountMode> LpTimer<M>`
fn configure_encoder(&mut self, arr: u16) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
// Configure in encoder mode
self.lptim.cfgr.write(|w| {
w
// Make sure prescaler is disabled. Encoder mode forbids prescaling.
.presc()
.div1()
// Put timer into encoder mode
.enc()
.set_bit()
// Choose internal clock source - external sources not supported in encoder mode.
.cksel()
.clear_bit()
// Start counting from software trigger
.trigen()
.sw()
// Clock polarity
.ckpol()
.both_edges()
});
// Enable timer
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(arr));
}
/// Enable the timer and begin counting encoder pulses.
///
/// The provided value is stored in the ARR (Auto Reload Register). The timer's internal counter
/// will wrap when this value is reached.
pub fn enable(&mut self, arr: u16) {
self.configure_encoder(arr);
// Enable timer, enable continuous mode
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
/// Disable the timer.
pub fn disable(&mut self) {
self.lptim.cr.write(|w| w.enable().clear_bit());
}
/// Get the current count of the encoder.
pub fn count(&self) -> u16 {
(self.lptim.cnt.read().bits() & 0xffff) as u16
}
/// Clear all LPTIM interrupt flags
pub fn clear_flags(&self) {
self.lptim.icr.write(|w| unsafe { w.bits(0x7f) });
}
}
impl<M: CountMode> LpTimer<M> {
fn init(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
// `pwr` is not used. It is used as a marker that guarantees that `PWR.CR` is set so this
// function can set the `RCC.LSEON` bit, which is otherwise write protected.
let _ = pwr;
// Enable selected clock and determine its frequency
let input_freq = match clk {
ClockSrc::Apb1 => rcc.clocks.apb1_clk(), // always enabled
ClockSrc::Lsi => {
// Turn on LSI
rcc.rb.csr.modify(|_, w| w.lsion().set_bit());
// Wait for LSI to be ready
while rcc.rb.csr.read().lsirdy().bit_is_clear() {}
Hertz(37_000)
}
ClockSrc::Hsi16 => {
// Turn on HSI16
rcc.rb.cr.modify(|_, w| w.hsi16on().set_bit());
// Wait for HSI16 to be ready
while rcc.rb.cr.read().hsi16rdyf().bit_is_clear() {}
Hertz(16_000_000)
}
ClockSrc::Lse => {
// Turn on LSE
rcc.rb.csr.modify(|_, w| w.lseon().set_bit());
// Wait for LSE to be ready
while rcc.rb.csr.read().lserdy().bit_is_clear() {}
Hertz(32_768)
}
};
// Select and enable clock. Right now we only support the internal RCC clocks, but LPTIM can
// also run as a counter with a dedicated external input.
rcc.rb.ccipr.modify(|_, w| w.lptim1sel().bits(clk as u8));
LPTIM::enable(rcc);
LPTIM::reset(rcc);
Self {
lptim,
input_freq,
_mode: PhantomData,
}
}
/// Disables the timer and configures it so that starting it will make it fire at the given
/// frequency.
fn configure(&mut self, conf: TimeConf) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
self.lptim
.cfgr
.write(|w| w.presc().bits(conf.psc_encoded).timout().set_bit());
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(conf.arr));
}
/// Disables and destructs the timer, returning the raw `LPTIM` peripheral.
pub fn free(self) -> LPTIM {
self.lptim.cr.reset();
self.lptim
}
/// Disables the timer and enables the given interrupts.
pub fn enable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().enabled();
}
if interrupts.enc_dir_up {
w.upie().enabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().enabled();
}
if interrupts.compare_update_ok {
w.cmpokie().enabled();
}
if interrupts.ext_trig {
w.exttrigie().enabled();
}
if interrupts.autoreload_match {
w.arrmie().enabled();
}
if interrupts.compare_match {
w.cmpmie().enabled();
}
w
})
}
/// Disables the timer and disables the given interrupts.
pub fn disable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().disabled();
}
if interrupts.enc_dir_up {
w.upie().disabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().disabled();
}
if interrupts.compare_update_ok {
w.cmpokie().disabled();
}
if interrupts.ext_trig {
w.exttrigie().disabled();
}
if interrupts.autoreload_match {
w.arrmie().disabled();
}
if interrupts.compare_match {
w.cmpmie().disabled();
}
w
})
}
}
impl hal::timer::CountDown for LpTimer<Periodic> {
type Time = Hertz;
fn start<T>(&mut self, freq: T)
where
T: Into<Hertz>,
{
self.configure(TimeConf::calculate_freq(self.input_freq, freq.into()));
// Start LPTIM in continuous mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
fn wait(&mut self) -> nb::Result<(), Void> {
if self.lptim.isr.read().arrm().bit_is_clear() {
Err(nb::Error::WouldBlock)
} else {
self.lptim.icr.write(|w| w.arrmcf().set_bit());
Ok(())
}
}
}
impl hal::timer::Periodic for LpTimer<Periodic> {}
impl hal::timer::CountDown for LpTimer<OneShot> {
type Time = Microseconds;
fn start<T>(&mut self, period: T)
where
T: Into<Microseconds>,
{
self.configure(TimeConf::calculate_period(self.input_freq, period.into()));
// Start LPTIM in one-shot mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().sngstrt().set_bit());
}
fn wait(&mut self) -> nb::Result<(), Void> {
if self.lptim.isr.read().arrm().bit_is_clear() {
Err(nb::Error::WouldBlock)
} else {
self.lptim.icr.write(|w| w.arrmcf().set_bit());
Ok(())
}
}
}
#[derive(Copy, Clone)]
struct TimeConf {
psc_encoded: u8,
arr: u16,
}
impl TimeConf {
const ARR_MAX: u16 = u16::max_value();
/// Calculates prescaler and autoreload value for producing overflows at a rate of
/// `output_freq`.
fn calculate_freq(input_freq: Hertz, output_freq: Hertz) -> Self {
// Fi = Frequency of input clock
// Fo = Output frequency (frequency of timer overflows, using ARR)
// psc = prescaler (must be power of two in range 1..=128)
// We know Fi and Fo, and want to know psc and ARR.
//
// The timer works like this:
// Fo = (Fi / psc) / ARR
//
// Therefore:
// Fo * ARR = Fi / psc
// Fo * ARR * psc = Fi
// ARR = (Fi / Fo) / psc
// psc = (Fi / Fo) / ARR
//
// We first calculate `psc` by assuming the largest `ARR` value, and round the result to the
// next power of two. If that's > 128, the chosen frequency is too slow for the timer and
// we panic. Otherwise we use that `psc` to calculate the real `ARR`.
// Add `ARR_MAX - 1` to round the result upwards
let psc = ((input_freq.0 / output_freq.0) + (u32(Self::ARR_MAX) - 1)) / u32(Self::ARR_MAX);
let psc = psc.next_power_of_two(); // always >= 1
assert!(psc <= 128);
// This calculation must be in u16 range because we assume the max. ARR value above ^
let arr = u16::try_from((input_freq.0 / output_freq.0) / psc).unwrap();
// PSC encoding is N where `psc = 2^N`
let psc_encoded = psc.trailing_zeros() as u8;
Self { psc_encoded, arr }
}
/// Calculates prescaler and autoreload value for producing overflows after every
/// `output_period`.
fn calculate_period(input_freq: Hertz, output_period: Microseconds) -> Self {
// Here, the `output_period` can be very long, resulting in an output frequency of < 1 Hz.
// Fi = Frequency of input clock
// Fo = Output frequency (frequency of timer overflows, using ARR)
// Po = 1 / Fo = Output Period
// psc = prescaler (must be power of two in range 1..=128)
// We know Fi and Fo, and want to know psc and ARR.
//
// The timer works like this:
// Fo = 1 / Po = (Fi / psc) / ARR
//
// Therefore:
// ARR / Po = Fi / psc
// (ARR * psc) / Po = Fi
// ARR * psc = Fi * Po
// ARR = (Fi * Po) / psc
// psc = (Fi * Po) / ARR
//
// We first calculate `psc` by assuming the largest `ARR` value, and round the result to the
// next power of two. If that's > 128, the chosen period is too long for the timer and we
// panic. Otherwise we use that `psc` to calculate the real `ARR`.
// First, calculate the product `Fi * Po`. Since `output_period` is in µs, we have to divide
// it by 1_000_000 to get seconds, without losing much precision. We can divide either of
// the multiplicants, or the resulting product. Dividing the resulting product results in
// the least amount of rouding error, but might require 64-bit multiplication and division,
// which is very expensive. Dividing either of the multiplicands by 1_000_000 can easily
// result in significant rounding error that makes this API useless.
let fi_po = u32(u64(input_freq.0) * u64(output_period.0) / 1_000_000).unwrap();
// Add `ARR_MAX - 1` to round the result upwards
let psc = (fi_po + (u32(Self::ARR_MAX) - 1)) / u32(Self::ARR_MAX);
assert!(psc > 0); // if 0, the output period is too short to be produced from input_freq
let psc = psc.next_power_of_two(); // always >= 1
assert!(psc <= 128); // if > 128, the output period is too long to be produced from input_freq
// This calculation must be in u16 range because we assume the max. ARR value above ^
let arr = (fi_po / psc) as u16;
// PSC encoding is N where `psc = 2^N`
let psc_encoded = psc.trailing_zeros() as u8;
Self { psc_encoded, arr }
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Test-only methods.
impl TimeConf {
fn psc(&self) -> u8 {
1 << self.psc_encoded
}
/// Calculates the output frequency if the timer is configured according to `self` and is run at
/// `input_freq`.
fn output_freq(&self, input_freq: Hertz) -> Hertz {
Hertz(input_freq.0 / u32(self.psc()) / u32(self.arr))
}
fn output_period(&self, input_freq: Hertz) -> Microseconds {
Microseconds(
u32(u64(self.psc()) * u64(self.arr) * 1_000_000 / u64(input_freq.0)).unwrap(),
)
}
}
#[test]
fn calc_from_freq() {
// no psc necessary (so psc=1)
let c = TimeConf::calculate_freq(32_768.hz(), 1.hz());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 32_768);
assert_eq!(c.output_freq(32_768.hz()), 1.hz());
// barely works with psc=1
let c = TimeConf::calculate_freq(65535.hz(), 1.hz());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 65535);
assert_eq!(c.output_freq(65535.hz()), 1.hz());
// barely needs psc=2
let c = TimeConf::calculate_freq(65536.hz(), 1.hz());
assert_eq!(c.psc(), 2);
assert_eq!(c.arr, 32768);
assert_eq!(c.output_freq(65536.hz()), 1.hz());
// maximum possible ratio, needs psc=128 and max ARR
let c = TimeConf::calculate_freq((65535 * 128).hz(), 1.hz());
assert_eq!(c.psc(), 128);
assert_eq!(c.arr, 65535);
assert_eq!(c.output_freq((65535 * 128).hz()), 1.hz());
}
#[test]
#[should_panic(expected = "assertion failed: psc <= 128")]
fn freq_ratio_too_large() {
TimeConf::calculate_freq((65535 * 128 + 1).hz(), 1.hz());
}
#[test]
fn calc_from_period() {
// 1:1 ratio
let c = TimeConf::calculate_period(1_000.hz(), 1_000.us());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 1);
assert_eq!(c.output_freq(1_000.hz()), 1_000.hz());
// real-world test: go from 32.768 kHz to 10 s
let c = TimeConf::calculate_period(32_768.hz(), 10_000_000.us());
assert_eq!(c.psc(), 8);
assert_eq!(c.arr, 40960);
assert_eq!(c.output_freq(32_768.hz()), 0.hz());
assert_eq!(c.output_period(32_768.hz()), 10_000_000.us());
}
#[test]
#[should_panic(expected = "assertion failed: psc > 0")]
fn period_too_short() {
TimeConf::calculate_period(1_000.hz(), 999.us());
}
}
| {
Self::init(lptim, pwr, rcc, clk)
} | identifier_body |
lptim.rs | //! Low-Power Timer (LPTIM) support.
use crate::gpio::{self, gpiob};
use crate::hal;
use crate::pac::LPTIM;
use crate::pwr::PWR;
use crate::rcc::{Enable, Rcc, Reset};
use cast::{u32, u64};
use core::convert::TryFrom;
use core::marker::PhantomData;
use embedded_time::duration::Microseconds;
use embedded_time::rate::Hertz;
use void::Void;
mod sealed {
pub trait Sealed {}
}
/// Low-Power Timer counting in one-shot mode.
pub enum OneShot {}
/// Low-Power Timer counting in periodic mode.
pub enum Periodic {}
/// Low-Power Timer in encoder mode.
pub enum Encoder {}
impl sealed::Sealed for OneShot {}
impl sealed::Sealed for Periodic {}
impl sealed::Sealed for Encoder {}
/// Marker trait for counter directions.
pub trait CountMode: sealed::Sealed {}
impl CountMode for OneShot {}
impl CountMode for Periodic {}
impl CountMode for Encoder {}
/// Clock source selection for the Low-Power Timer `LPTIM`.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ClockSrc {
/// Drive LPTIM with APB1 clock.
Apb1 = 0b00,
/// Drive LPTIM with Low-Speed Internal (LSI) clock.
///
/// The user has to ensure that the LSI clock is running, or the timer won't
/// start counting.
Lsi = 0b01,
/// Drive LPTIM with Internal 16 MHz clock.
Hsi16 = 0b10,
/// Drive LPTIM with Low-Speed External (LSE) clock at 32.768 kHz.
///
/// The user has to ensure that the LSE clock is running, or the timer won't
/// start counting.
Lse = 0b11,
}
/// Interrupt enable flags.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
pub struct Interrupts {
/// Encoder direction change to down.
pub enc_dir_down: bool,
/// Encoder direction change to up.
pub enc_dir_up: bool,
/// ARR register update successful.
pub autoreload_update_ok: bool,
/// CMP register update successful.
pub compare_update_ok: bool,
/// Valid edge on ext. trigger input.
pub ext_trig: bool,
/// ARR register matches current CNT value.
pub autoreload_match: bool,
/// CMP register matches current CNT value.
pub compare_match: bool,
}
/// Low-Power Timer (`LPTIM`).
///
/// The Low-Power Timer is a 16-bit timer with a prescaler of up to 128. It can run off of the APB1,
/// LSI, HSI16, or LSE clocks. With LSE, the slowest clock at 32.768 kHz, this results in a maximum
/// timeout of 256 seconds, or 4 minutes and 16 seconds.
///
/// The timer can be initialized either in one-shot mode or in periodic mode, using `init_oneshot`
/// or `init_periodic` respectively. In periodic mode, the embedded-hal `Periodic` marker trait is
/// implemented and the `CountDown` implementation uses `Hertz` as the time unit. In one-shot mode,
/// the `CountDown` implementation instead uses `Microseconds`, allowing for a multi-second timeout
/// to be configured (with the tradeoff being a larger code size due to use of 64-bit arithmetic).
pub struct LpTimer<M: CountMode> {
lptim: LPTIM,
input_freq: Hertz,
_mode: PhantomData<M>,
}
impl LpTimer<Periodic> {
/// Initializes the Low-Power Timer in periodic mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_periodic(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<OneShot> {
/// Initializes the Low-Power Timer in one-shot mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_oneshot(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<Encoder> {
/// Initializes the Low-Power Timer in encoder mode.
///
/// The `start` method must be called to enable the encoder input.
pub fn init_encoder(
lptim: LPTIM,
pwr: &mut PWR,
rcc: &mut Rcc,
clk: ClockSrc,
(pb5, pb7): (gpiob::PB5<gpio::Analog>, gpiob::PB7<gpio::Analog>),
) -> Self {
pb5.set_alt_mode(gpio::AltMode::AF2);
pb7.set_alt_mode(gpio::AltMode::AF2);
Self::init(lptim, pwr, rcc, clk)
}
// TODO: Dedupe this fn with configure() function in `impl<M: CountMode> LpTimer<M>`
fn configure_encoder(&mut self, arr: u16) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
// Configure in encoder mode
self.lptim.cfgr.write(|w| {
w
// Make sure prescaler is disabled. Encoder mode forbids prescaling.
.presc()
.div1()
// Put timer into encoder mode
.enc()
.set_bit()
// Choose internal clock source - external sources not supported in encoder mode.
.cksel()
.clear_bit()
// Start counting from software trigger
.trigen()
.sw()
// Clock polarity
.ckpol()
.both_edges()
});
// Enable timer
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(arr));
}
/// Enable the timer and begin counting encoder pulses.
///
/// The provided value is stored in the ARR (Auto Reload Register). The timer's internal counter
/// will wrap when this value is reached.
pub fn enable(&mut self, arr: u16) {
self.configure_encoder(arr);
// Enable timer, enable continuous mode
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
/// Disable the timer.
pub fn disable(&mut self) {
self.lptim.cr.write(|w| w.enable().clear_bit());
}
/// Get the current count of the encoder.
pub fn count(&self) -> u16 {
(self.lptim.cnt.read().bits() & 0xffff) as u16
}
/// Clear all LPTIM interrupt flags
pub fn clear_flags(&self) {
self.lptim.icr.write(|w| unsafe { w.bits(0x7f) });
}
}
impl<M: CountMode> LpTimer<M> {
fn init(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
// `pwr` is not used. It is used as a marker that guarantees that `PWR.CR` is set so this
// function can set the `RCC.LSEON` bit, which is otherwise write protected.
let _ = pwr;
// Enable selected clock and determine its frequency
let input_freq = match clk {
ClockSrc::Apb1 => rcc.clocks.apb1_clk(), // always enabled
ClockSrc::Lsi => {
// Turn on LSI
rcc.rb.csr.modify(|_, w| w.lsion().set_bit());
// Wait for LSI to be ready
while rcc.rb.csr.read().lsirdy().bit_is_clear() {}
Hertz(37_000)
}
ClockSrc::Hsi16 => {
// Turn on HSI16
rcc.rb.cr.modify(|_, w| w.hsi16on().set_bit());
// Wait for HSI16 to be ready
while rcc.rb.cr.read().hsi16rdyf().bit_is_clear() {}
Hertz(16_000_000)
}
ClockSrc::Lse => {
// Turn on LSE
rcc.rb.csr.modify(|_, w| w.lseon().set_bit());
// Wait for LSE to be ready
while rcc.rb.csr.read().lserdy().bit_is_clear() {}
Hertz(32_768)
}
};
// Select and enable clock. Right now we only support the internal RCC clocks, but LPTIM can
// also run as a counter with a dedicated external input.
rcc.rb.ccipr.modify(|_, w| w.lptim1sel().bits(clk as u8));
LPTIM::enable(rcc);
LPTIM::reset(rcc);
Self {
lptim,
input_freq,
_mode: PhantomData,
}
}
/// Disables the timer and configures it so that starting it will make it fire at the given
/// frequency.
fn configure(&mut self, conf: TimeConf) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
self.lptim
.cfgr
.write(|w| w.presc().bits(conf.psc_encoded).timout().set_bit()); | // The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(conf.arr));
}
/// Disables and destructs the timer, returning the raw `LPTIM` peripheral.
pub fn free(self) -> LPTIM {
self.lptim.cr.reset();
self.lptim
}
/// Disables the timer and enables the given interrupts.
pub fn enable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().enabled();
}
if interrupts.enc_dir_up {
w.upie().enabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().enabled();
}
if interrupts.compare_update_ok {
w.cmpokie().enabled();
}
if interrupts.ext_trig {
w.exttrigie().enabled();
}
if interrupts.autoreload_match {
w.arrmie().enabled();
}
if interrupts.compare_match {
w.cmpmie().enabled();
}
w
})
}
/// Disables the timer and disables the given interrupts.
pub fn disable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().disabled();
}
if interrupts.enc_dir_up {
w.upie().disabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().disabled();
}
if interrupts.compare_update_ok {
w.cmpokie().disabled();
}
if interrupts.ext_trig {
w.exttrigie().disabled();
}
if interrupts.autoreload_match {
w.arrmie().disabled();
}
if interrupts.compare_match {
w.cmpmie().disabled();
}
w
})
}
}
impl hal::timer::CountDown for LpTimer<Periodic> {
type Time = Hertz;
fn start<T>(&mut self, freq: T)
where
T: Into<Hertz>,
{
self.configure(TimeConf::calculate_freq(self.input_freq, freq.into()));
// Start LPTIM in continuous mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
fn wait(&mut self) -> nb::Result<(), Void> {
if self.lptim.isr.read().arrm().bit_is_clear() {
Err(nb::Error::WouldBlock)
} else {
self.lptim.icr.write(|w| w.arrmcf().set_bit());
Ok(())
}
}
}
impl hal::timer::Periodic for LpTimer<Periodic> {}
impl hal::timer::CountDown for LpTimer<OneShot> {
type Time = Microseconds;
fn start<T>(&mut self, period: T)
where
T: Into<Microseconds>,
{
self.configure(TimeConf::calculate_period(self.input_freq, period.into()));
// Start LPTIM in one-shot mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().sngstrt().set_bit());
}
fn wait(&mut self) -> nb::Result<(), Void> {
if self.lptim.isr.read().arrm().bit_is_clear() {
Err(nb::Error::WouldBlock)
} else {
self.lptim.icr.write(|w| w.arrmcf().set_bit());
Ok(())
}
}
}
#[derive(Copy, Clone)]
struct TimeConf {
psc_encoded: u8,
arr: u16,
}
impl TimeConf {
const ARR_MAX: u16 = u16::max_value();
/// Calculates prescaler and autoreload value for producing overflows at a rate of
/// `output_freq`.
fn calculate_freq(input_freq: Hertz, output_freq: Hertz) -> Self {
// Fi = Frequency of input clock
// Fo = Output frequency (frequency of timer overflows, using ARR)
// psc = prescaler (must be power of two in range 1..=128)
// We know Fi and Fo, and want to know psc and ARR.
//
// The timer works like this:
// Fo = (Fi / psc) / ARR
//
// Therefore:
// Fo * ARR = Fi / psc
// Fo * ARR * psc = Fi
// ARR = (Fi / Fo) / psc
// psc = (Fi / Fo) / ARR
//
// We first calculate `psc` by assuming the largest `ARR` value, and round the result to the
// next power of two. If that's > 128, the chosen frequency is too slow for the timer and
// we panic. Otherwise we use that `psc` to calculate the real `ARR`.
// Add `ARR_MAX - 1` to round the result upwards
let psc = ((input_freq.0 / output_freq.0) + (u32(Self::ARR_MAX) - 1)) / u32(Self::ARR_MAX);
let psc = psc.next_power_of_two(); // always >= 1
assert!(psc <= 128);
// This calculation must be in u16 range because we assume the max. ARR value above ^
let arr = u16::try_from((input_freq.0 / output_freq.0) / psc).unwrap();
// PSC encoding is N where `psc = 2^N`
let psc_encoded = psc.trailing_zeros() as u8;
Self { psc_encoded, arr }
}
/// Calculates prescaler and autoreload value for producing overflows after every
/// `output_period`.
fn calculate_period(input_freq: Hertz, output_period: Microseconds) -> Self {
// Here, the `output_period` can be very long, resulting in an output frequency of < 1 Hz.
// Fi = Frequency of input clock
// Fo = Output frequency (frequency of timer overflows, using ARR)
// Po = 1 / Fo = Output Period
// psc = prescaler (must be power of two in range 1..=128)
// We know Fi and Fo, and want to know psc and ARR.
//
// The timer works like this:
// Fo = 1 / Po = (Fi / psc) / ARR
//
// Therefore:
// ARR / Po = Fi / psc
// (ARR * psc) / Po = Fi
// ARR * psc = Fi * Po
// ARR = (Fi * Po) / psc
// psc = (Fi * Po) / ARR
//
// We first calculate `psc` by assuming the largest `ARR` value, and round the result to the
// next power of two. If that's > 128, the chosen period is too long for the timer and we
// panic. Otherwise we use that `psc` to calculate the real `ARR`.
// First, calculate the product `Fi * Po`. Since `output_period` is in µs, we have to divide
// it by 1_000_000 to get seconds, without losing much precision. We can divide either of
// the multiplicants, or the resulting product. Dividing the resulting product results in
// the least amount of rouding error, but might require 64-bit multiplication and division,
// which is very expensive. Dividing either of the multiplicands by 1_000_000 can easily
// result in significant rounding error that makes this API useless.
let fi_po = u32(u64(input_freq.0) * u64(output_period.0) / 1_000_000).unwrap();
// Add `ARR_MAX - 1` to round the result upwards
let psc = (fi_po + (u32(Self::ARR_MAX) - 1)) / u32(Self::ARR_MAX);
assert!(psc > 0); // if 0, the output period is too short to be produced from input_freq
let psc = psc.next_power_of_two(); // always >= 1
assert!(psc <= 128); // if > 128, the output period is too long to be produced from input_freq
// This calculation must be in u16 range because we assume the max. ARR value above ^
let arr = (fi_po / psc) as u16;
// PSC encoding is N where `psc = 2^N`
let psc_encoded = psc.trailing_zeros() as u8;
Self { psc_encoded, arr }
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Test-only methods.
impl TimeConf {
fn psc(&self) -> u8 {
1 << self.psc_encoded
}
/// Calculates the output frequency if the timer is configured according to `self` and is run at
/// `input_freq`.
fn output_freq(&self, input_freq: Hertz) -> Hertz {
Hertz(input_freq.0 / u32(self.psc()) / u32(self.arr))
}
fn output_period(&self, input_freq: Hertz) -> Microseconds {
Microseconds(
u32(u64(self.psc()) * u64(self.arr) * 1_000_000 / u64(input_freq.0)).unwrap(),
)
}
}
#[test]
fn calc_from_freq() {
// no psc necessary (so psc=1)
let c = TimeConf::calculate_freq(32_768.hz(), 1.hz());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 32_768);
assert_eq!(c.output_freq(32_768.hz()), 1.hz());
// barely works with psc=1
let c = TimeConf::calculate_freq(65535.hz(), 1.hz());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 65535);
assert_eq!(c.output_freq(65535.hz()), 1.hz());
// barely needs psc=2
let c = TimeConf::calculate_freq(65536.hz(), 1.hz());
assert_eq!(c.psc(), 2);
assert_eq!(c.arr, 32768);
assert_eq!(c.output_freq(65536.hz()), 1.hz());
// maximum possible ratio, needs psc=128 and max ARR
let c = TimeConf::calculate_freq((65535 * 128).hz(), 1.hz());
assert_eq!(c.psc(), 128);
assert_eq!(c.arr, 65535);
assert_eq!(c.output_freq((65535 * 128).hz()), 1.hz());
}
#[test]
#[should_panic(expected = "assertion failed: psc <= 128")]
fn freq_ratio_too_large() {
TimeConf::calculate_freq((65535 * 128 + 1).hz(), 1.hz());
}
#[test]
fn calc_from_period() {
// 1:1 ratio
let c = TimeConf::calculate_period(1_000.hz(), 1_000.us());
assert_eq!(c.psc(), 1);
assert_eq!(c.arr, 1);
assert_eq!(c.output_freq(1_000.hz()), 1_000.hz());
// real-world test: go from 32.768 kHz to 10 s
let c = TimeConf::calculate_period(32_768.hz(), 10_000_000.us());
assert_eq!(c.psc(), 8);
assert_eq!(c.arr, 40960);
assert_eq!(c.output_freq(32_768.hz()), 0.hz());
assert_eq!(c.output_period(32_768.hz()), 10_000_000.us());
}
#[test]
#[should_panic(expected = "assertion failed: psc > 0")]
fn period_too_short() {
TimeConf::calculate_period(1_000.hz(), 999.us());
}
} |
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled." | random_line_split |
thread.go | package runtime
import (
"errors"
"sync"
"unsafe"
)
// ThreadStatus is the type of a thread status
type ThreadStatus uint
// Available statuses for threads.
const (
ThreadOK ThreadStatus = 0 // Running thread
ThreadSuspended ThreadStatus = 1 // Thread has yielded and is waiting to be resumed
ThreadDead ThreadStatus = 3 // Thread has finished and cannot be resumed
)
// The depth of GoFunction calls in one thread is limited by this number in
// order to avoid irrecoverable Go stack overflows.
const maxGoFunctionCallDepth = 1000
// Data passed between Threads via their resume channel (Thread.resumeCh).
//
// Supported types for exception are ContextTerminationError (which means
// execution has run out of resources) and threadClose (which means the thread
// should be closed without resuming execution, new in Lua 5.4 via the
// coroutine.close() function). Other types will cause a panic.
type valuesError struct {
args []Value // arguments ot yield or resume
err error // execution error
exception interface{} // used when the thread should be closed right away
}
// A Thread is a lua thread.
//
// The mutex guarantees that if status == ThreadRunning, then caller
// is not nil.
//
type Thread struct {
*Runtime
mux sync.Mutex
status ThreadStatus
closeErr error // Error that caused the thread to stop
currentCont Cont // Currently running continuation
resumeCh chan valuesError
caller *Thread // Who resumed this thread
// Depth of GoFunction calls in the thread. This should not exceed
// maxGoFunctionCallDepth. The aim is to avoid Go stack overflows that
// cannot be recovered from (note that this does not limit recursion for Lua
// functions).
goFunctionCallDepth int
DebugHooks
closeStack // Stack of pending to-be-closed values
}
// NewThread creates a new thread out of a Runtime. Its initial
// status is suspended. Call Resume to run it.
func NewThread(r *Runtime) *Thread {
r.RequireSize(unsafe.Sizeof(Thread{}) + 100) // 100 is my guess at the size of a channel
return &Thread{
resumeCh: make(chan valuesError),
status: ThreadSuspended,
Runtime: r,
}
}
// CurrentCont returns the continuation currently running (or suspended) in the
// thread.
func (t *Thread) CurrentCont() Cont {
return t.currentCont
}
// IsMain returns true if the thread is the runtime's main thread.
func (t *Thread) IsMain() bool {
return t == t.mainThread
}
const maxErrorsInMessageHandler = 10
var errErrorInMessageHandler = StringValue("error in error handling")
// RunContinuation runs the continuation c in the thread. It keeps running until
// the next continuation is nil or an error occurs, in which case it returns the
// error.
func (t *Thread) RunContinuation(c Cont) (err error) {
var next Cont
var errContCount = 0
_ = t.triggerCall(t, c)
for c != nil {
if t != t.gcThread {
t.runPendingFinalizers()
}
t.currentCont = c
next, err = c.RunInThread(t)
if err != nil {
rtErr := ToError(err)
if rtErr.Handled() {
return rtErr
}
err = rtErr.AddContext(c, -1)
errContCount++
if t.messageHandler != nil {
if errContCount > maxErrorsInMessageHandler {
return newHandledError(errErrorInMessageHandler)
}
next = t.messageHandler.Continuation(t, newMessageHandlerCont(c))
} else {
next = newMessageHandlerCont(c)
}
next.Push(t.Runtime, ErrorValue(err))
}
c = next
}
return
}
// This is to be able to close a suspended coroutine without completing it, but
// still allow cleaning up the to-be-closed variables. If this is put on the
// resume channel of a running thread, yield will cause a panic in the goroutine
// and that will be caught in the defer() clause below.
type threadClose struct{}
//
// Coroutine management
//
// Start starts the thread in a goroutine, giving it the callable c to run. the
// t.Resume() method needs to be called to provide arguments to the callable.
func (t *Thread) Start(c Callable) {
t.RequireBytes(2 << 10) // A goroutine starts off with 2k stack
go func() {
var (
args []Value
err error
)
// If there was a panic due to an exceeded quota, we need to end the
// thread and propagate that panic to the calling thread
defer func() {
r := recover()
if r != nil {
switch r.(type) {
case ContextTerminationError:
case threadClose:
// This means we want to close the coroutine, so no panic!
r = nil
default:
panic(r)
}
}
t.end(args, err, r)
}()
args, err = t.getResumeValues()
if err == nil {
next := NewTerminationWith(t.CurrentCont(), 0, true)
err = t.call(c, args, next)
args = next.Etc()
}
}()
}
// Status returns the status of a thread (suspended, running or dead).
func (t *Thread) Status() ThreadStatus {
return t.status
}
// Resume execution of a suspended thread. Its status switches to
// running while its caller's status switches to suspended.
func (t *Thread) Resume(caller *Thread, args []Value) ([]Value, error) {
t.mux.Lock()
if t.status != ThreadSuspended {
t.mux.Unlock()
switch t.status {
case ThreadDead:
return nil, errors.New("cannot resume dead thread")
default:
return nil, errors.New("cannot resume running thread")
}
}
caller.mux.Lock()
if caller.status != ThreadOK |
t.caller = caller
t.status = ThreadOK
t.mux.Unlock()
caller.mux.Unlock()
t.sendResumeValues(args, nil, nil)
return caller.getResumeValues()
}
// Close a suspended thread. If successful, its status switches to dead. The
// boolean returned is true if it was possible to close the thread (i.e. it was
// suspended or already dead). The error is non-nil if there was an error in
// the cleanup process, or if the thread had already stopped with an error
// previously.
func (t *Thread) Close(caller *Thread) (bool, error) {
t.mux.Lock()
if t.status != ThreadSuspended {
t.mux.Unlock()
switch t.status {
case ThreadDead:
return true, t.closeErr
default:
return false, nil
}
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to close is not running")
}
// The thread needs to go back to running to empty its close stack, before
// becoming dead.
t.caller = caller
t.status = ThreadOK
t.mux.Unlock()
caller.mux.Unlock()
t.sendResumeValues(nil, nil, threadClose{})
_, err := caller.getResumeValues()
return true, err
}
// Yield to the caller thread. The yielding thread's status switches to
// suspended. The caller's status must be OK.
func (t *Thread) Yield(args []Value) ([]Value, error) {
t.mux.Lock()
if t.status != ThreadOK {
panic("Thread to yield is not running")
}
caller := t.caller
if caller == nil {
t.mux.Unlock()
return nil, errors.New("cannot yield from main thread")
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to yield is not OK")
}
t.status = ThreadSuspended
t.caller = nil
t.mux.Unlock()
caller.mux.Unlock()
caller.sendResumeValues(args, nil, nil)
return t.getResumeValues()
}
// This turns off the thread, cleaning up its close stack. The thread must be
// running.
func (t *Thread) end(args []Value, err error, exception interface{}) {
caller := t.caller
t.mux.Lock()
caller.mux.Lock()
defer t.mux.Unlock()
defer caller.mux.Unlock()
switch {
case t.status != ThreadOK:
panic("Called Thread.end on a non-running thread")
case caller.status != ThreadOK:
panic("Caller thread of ending thread is not OK")
}
close(t.resumeCh)
t.status = ThreadDead
t.caller = nil
err = t.cleanupCloseStack(nil, 0, err) // TODO: not nil
t.closeErr = err
caller.sendResumeValues(args, err, exception)
t.ReleaseBytes(2 << 10) // The goroutine will terminate after this
}
func (t *Thread) call(c Callable, args []Value, next Cont) error {
cont := c.Continuation(t, next)
t.Push(cont, args...)
return t.RunContinuation(cont)
}
func (t *Thread) getResumeValues() ([]Value, error) {
res := <-t.resumeCh
if res.exception != nil {
panic(res.exception)
}
return res.args, res.err
}
func (t *Thread) sendResumeValues(args []Value, err error, exception interface{}) {
t.resumeCh <- valuesError{args: args, err: err, exception: exception}
}
//
// Calling
//
// CallContext pushes a new runtime context on the thread's runtime and attempts
// to run f() in the thread. If the context runs out of resources while f() is
// running, all operations should abort and the CallContext should return
// immediately and not finalizing pending to-be-closed values.
//
// Otherwise (even if f() returns an error), pending to-be-closed values should
// be finalized.
//
// See quotas.md for details about this API.
func (t *Thread) CallContext(def RuntimeContextDef, f func() error) (ctx RuntimeContext, err error) {
t.PushContext(def)
c, h := t.CurrentCont(), t.closeStack.size()
defer func() {
ctx = t.PopContext()
if r := recover(); r != nil {
t.closeStack.truncate(h) // No resources to run that, so just discard it.
termErr, ok := r.(ContextTerminationError)
if !ok {
panic(r)
}
err = termErr
}
}()
err = t.cleanupCloseStack(c, h, f())
if t.GCPolicy() == IsolateGCPolicy {
t.runFinalizers(t.weakRefPool.ExtractAllMarkedFinalize())
}
if err != nil {
t.setStatus(StatusError)
}
return
}
//
// close stack operations
//
type closeStack struct {
stack []Value
}
func (s closeStack) size() int {
return len(s.stack)
}
func (s *closeStack) push(v Value) {
s.stack = append(s.stack, v)
}
func (s *closeStack) pop() (Value, bool) {
sz := len(s.stack)
if sz == 0 {
return NilValue, false
}
sz--
v := s.stack[sz]
s.stack = s.stack[:sz]
return v, true
}
func (s *closeStack) truncate(h int) {
sz := len(s.stack)
if sz > h {
s.stack = s.stack[:h]
}
}
// Truncate the close stack to size h, calling the __close metamethods in the
// context of the given continuation c and feeding them with the given error.
func (t *Thread) cleanupCloseStack(c Cont, h int, err error) error {
closeStack := &t.closeStack
for closeStack.size() > h {
v, _ := closeStack.pop()
if Truth(v) {
closeErr, ok := Metacall(t, v, "__close", []Value{v, ErrorValue(err)}, NewTerminationWith(c, 0, false))
if !ok {
return errors.New("to be closed value missing a __close metamethod")
}
if closeErr != nil {
err = closeErr
}
}
}
return err
}
//
// messageHandlerCont is a continuation that handles an error message (i.e.
// turns it to handled).
//
type messageHandlerCont struct {
c Cont
err Value
done bool
}
func newMessageHandlerCont(c Cont) *messageHandlerCont {
return &messageHandlerCont{c: c}
}
var _ Cont = (*messageHandlerCont)(nil)
func (c *messageHandlerCont) DebugInfo() *DebugInfo {
return c.c.DebugInfo()
}
func (c *messageHandlerCont) Next() Cont {
return c.c.Next()
}
func (c *messageHandlerCont) Parent() Cont {
return c.Next()
}
func (c *messageHandlerCont) Push(r *Runtime, v Value) {
if !c.done {
c.done = true
c.err = v
}
}
func (c *messageHandlerCont) PushEtc(r *Runtime, etc []Value) {
if c.done || len(etc) == 0 {
return
}
c.Push(r, etc[0])
}
func (c *messageHandlerCont) RunInThread(t *Thread) (Cont, error) {
return nil, newHandledError(c.err)
}
| {
panic("Caller of thread to resume is not running")
} | conditional_block |
thread.go | package runtime
import (
"errors"
"sync"
"unsafe"
)
// ThreadStatus is the type of a thread status
type ThreadStatus uint
// Available statuses for threads.
const (
ThreadOK ThreadStatus = 0 // Running thread
ThreadSuspended ThreadStatus = 1 // Thread has yielded and is waiting to be resumed
ThreadDead ThreadStatus = 3 // Thread has finished and cannot be resumed
)
// The depth of GoFunction calls in one thread is limited by this number in
// order to avoid irrecoverable Go stack overflows.
const maxGoFunctionCallDepth = 1000
// Data passed between Threads via their resume channel (Thread.resumeCh).
//
// Supported types for exception are ContextTerminationError (which means
// execution has run out of resources) and threadClose (which means the thread
// should be closed without resuming execution, new in Lua 5.4 via the
// coroutine.close() function). Other types will cause a panic.
type valuesError struct {
args []Value // arguments ot yield or resume
err error // execution error
exception interface{} // used when the thread should be closed right away
}
// A Thread is a lua thread.
//
// The mutex guarantees that if status == ThreadRunning, then caller
// is not nil.
//
type Thread struct {
*Runtime
mux sync.Mutex
status ThreadStatus
closeErr error // Error that caused the thread to stop
currentCont Cont // Currently running continuation
resumeCh chan valuesError
caller *Thread // Who resumed this thread
// Depth of GoFunction calls in the thread. This should not exceed
// maxGoFunctionCallDepth. The aim is to avoid Go stack overflows that
// cannot be recovered from (note that this does not limit recursion for Lua
// functions).
goFunctionCallDepth int
DebugHooks
closeStack // Stack of pending to-be-closed values
}
// NewThread creates a new thread out of a Runtime. Its initial
// status is suspended. Call Resume to run it.
func NewThread(r *Runtime) *Thread {
r.RequireSize(unsafe.Sizeof(Thread{}) + 100) // 100 is my guess at the size of a channel
return &Thread{
resumeCh: make(chan valuesError),
status: ThreadSuspended,
Runtime: r,
}
}
// CurrentCont returns the continuation currently running (or suspended) in the
// thread.
func (t *Thread) CurrentCont() Cont {
return t.currentCont
}
// IsMain returns true if the thread is the runtime's main thread.
func (t *Thread) IsMain() bool {
return t == t.mainThread
}
const maxErrorsInMessageHandler = 10
var errErrorInMessageHandler = StringValue("error in error handling")
// RunContinuation runs the continuation c in the thread. It keeps running until
// the next continuation is nil or an error occurs, in which case it returns the
// error.
func (t *Thread) RunContinuation(c Cont) (err error) {
var next Cont
var errContCount = 0
_ = t.triggerCall(t, c)
for c != nil {
if t != t.gcThread {
t.runPendingFinalizers()
}
t.currentCont = c
next, err = c.RunInThread(t)
if err != nil {
rtErr := ToError(err)
if rtErr.Handled() {
return rtErr
}
err = rtErr.AddContext(c, -1)
errContCount++
if t.messageHandler != nil {
if errContCount > maxErrorsInMessageHandler {
return newHandledError(errErrorInMessageHandler)
}
next = t.messageHandler.Continuation(t, newMessageHandlerCont(c))
} else {
next = newMessageHandlerCont(c)
}
next.Push(t.Runtime, ErrorValue(err))
}
c = next
}
return
}
// This is to be able to close a suspended coroutine without completing it, but
// still allow cleaning up the to-be-closed variables. If this is put on the
// resume channel of a running thread, yield will cause a panic in the goroutine
// and that will be caught in the defer() clause below.
type threadClose struct{}
//
// Coroutine management
//
// Start starts the thread in a goroutine, giving it the callable c to run. the
// t.Resume() method needs to be called to provide arguments to the callable.
func (t *Thread) Start(c Callable) {
t.RequireBytes(2 << 10) // A goroutine starts off with 2k stack
go func() {
var (
args []Value
err error
)
// If there was a panic due to an exceeded quota, we need to end the
// thread and propagate that panic to the calling thread
defer func() {
r := recover()
if r != nil {
switch r.(type) {
case ContextTerminationError:
case threadClose:
// This means we want to close the coroutine, so no panic!
r = nil
default:
panic(r)
}
}
t.end(args, err, r)
}()
args, err = t.getResumeValues()
if err == nil {
next := NewTerminationWith(t.CurrentCont(), 0, true)
err = t.call(c, args, next)
args = next.Etc()
}
}()
}
// Status returns the status of a thread (suspended, running or dead).
func (t *Thread) Status() ThreadStatus {
return t.status
}
// Resume execution of a suspended thread. Its status switches to
// running while its caller's status switches to suspended.
func (t *Thread) Resume(caller *Thread, args []Value) ([]Value, error) {
t.mux.Lock()
if t.status != ThreadSuspended {
t.mux.Unlock()
switch t.status {
case ThreadDead:
return nil, errors.New("cannot resume dead thread")
default:
return nil, errors.New("cannot resume running thread")
}
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to resume is not running")
}
t.caller = caller
t.status = ThreadOK
t.mux.Unlock()
caller.mux.Unlock()
t.sendResumeValues(args, nil, nil)
return caller.getResumeValues()
}
// Close a suspended thread. If successful, its status switches to dead. The
// boolean returned is true if it was possible to close the thread (i.e. it was
// suspended or already dead). The error is non-nil if there was an error in
// the cleanup process, or if the thread had already stopped with an error
// previously.
func (t *Thread) Close(caller *Thread) (bool, error) {
t.mux.Lock()
if t.status != ThreadSuspended {
t.mux.Unlock()
switch t.status {
case ThreadDead:
return true, t.closeErr
default:
return false, nil
}
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to close is not running")
}
// The thread needs to go back to running to empty its close stack, before
// becoming dead.
t.caller = caller
t.status = ThreadOK
t.mux.Unlock()
caller.mux.Unlock()
t.sendResumeValues(nil, nil, threadClose{})
_, err := caller.getResumeValues()
return true, err
}
// Yield to the caller thread. The yielding thread's status switches to
// suspended. The caller's status must be OK.
func (t *Thread) Yield(args []Value) ([]Value, error) {
t.mux.Lock()
if t.status != ThreadOK {
panic("Thread to yield is not running")
}
caller := t.caller
if caller == nil {
t.mux.Unlock()
return nil, errors.New("cannot yield from main thread")
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to yield is not OK")
}
t.status = ThreadSuspended
t.caller = nil
t.mux.Unlock()
caller.mux.Unlock()
caller.sendResumeValues(args, nil, nil)
return t.getResumeValues()
}
// This turns off the thread, cleaning up its close stack. The thread must be
// running.
func (t *Thread) end(args []Value, err error, exception interface{}) {
caller := t.caller
t.mux.Lock()
caller.mux.Lock()
defer t.mux.Unlock()
defer caller.mux.Unlock()
switch {
case t.status != ThreadOK:
panic("Called Thread.end on a non-running thread")
case caller.status != ThreadOK:
panic("Caller thread of ending thread is not OK")
}
close(t.resumeCh)
t.status = ThreadDead
t.caller = nil
err = t.cleanupCloseStack(nil, 0, err) // TODO: not nil
t.closeErr = err
caller.sendResumeValues(args, err, exception)
t.ReleaseBytes(2 << 10) // The goroutine will terminate after this
}
func (t *Thread) call(c Callable, args []Value, next Cont) error {
cont := c.Continuation(t, next)
t.Push(cont, args...)
return t.RunContinuation(cont)
}
func (t *Thread) getResumeValues() ([]Value, error) {
res := <-t.resumeCh
if res.exception != nil {
panic(res.exception)
}
return res.args, res.err
}
func (t *Thread) sendResumeValues(args []Value, err error, exception interface{}) {
t.resumeCh <- valuesError{args: args, err: err, exception: exception}
}
//
// Calling
//
// CallContext pushes a new runtime context on the thread's runtime and attempts
// to run f() in the thread. If the context runs out of resources while f() is
// running, all operations should abort and the CallContext should return
// immediately and not finalizing pending to-be-closed values.
//
// Otherwise (even if f() returns an error), pending to-be-closed values should
// be finalized.
//
// See quotas.md for details about this API.
func (t *Thread) CallContext(def RuntimeContextDef, f func() error) (ctx RuntimeContext, err error) {
t.PushContext(def)
c, h := t.CurrentCont(), t.closeStack.size()
defer func() {
ctx = t.PopContext()
if r := recover(); r != nil {
t.closeStack.truncate(h) // No resources to run that, so just discard it.
termErr, ok := r.(ContextTerminationError)
if !ok {
panic(r)
}
err = termErr
}
}()
err = t.cleanupCloseStack(c, h, f())
if t.GCPolicy() == IsolateGCPolicy {
t.runFinalizers(t.weakRefPool.ExtractAllMarkedFinalize())
}
if err != nil {
t.setStatus(StatusError)
}
return
}
//
// close stack operations
//
type closeStack struct {
stack []Value
}
func (s closeStack) size() int {
return len(s.stack)
}
func (s *closeStack) push(v Value) {
s.stack = append(s.stack, v)
}
func (s *closeStack) pop() (Value, bool) {
sz := len(s.stack)
if sz == 0 {
return NilValue, false
}
sz--
v := s.stack[sz]
s.stack = s.stack[:sz]
return v, true
}
func (s *closeStack) truncate(h int) |
// Truncate the close stack to size h, calling the __close metamethods in the
// context of the given continuation c and feeding them with the given error.
func (t *Thread) cleanupCloseStack(c Cont, h int, err error) error {
closeStack := &t.closeStack
for closeStack.size() > h {
v, _ := closeStack.pop()
if Truth(v) {
closeErr, ok := Metacall(t, v, "__close", []Value{v, ErrorValue(err)}, NewTerminationWith(c, 0, false))
if !ok {
return errors.New("to be closed value missing a __close metamethod")
}
if closeErr != nil {
err = closeErr
}
}
}
return err
}
//
// messageHandlerCont is a continuation that handles an error message (i.e.
// turns it to handled).
//
type messageHandlerCont struct {
c Cont
err Value
done bool
}
func newMessageHandlerCont(c Cont) *messageHandlerCont {
return &messageHandlerCont{c: c}
}
var _ Cont = (*messageHandlerCont)(nil)
func (c *messageHandlerCont) DebugInfo() *DebugInfo {
return c.c.DebugInfo()
}
func (c *messageHandlerCont) Next() Cont {
return c.c.Next()
}
func (c *messageHandlerCont) Parent() Cont {
return c.Next()
}
func (c *messageHandlerCont) Push(r *Runtime, v Value) {
if !c.done {
c.done = true
c.err = v
}
}
func (c *messageHandlerCont) PushEtc(r *Runtime, etc []Value) {
if c.done || len(etc) == 0 {
return
}
c.Push(r, etc[0])
}
func (c *messageHandlerCont) RunInThread(t *Thread) (Cont, error) {
return nil, newHandledError(c.err)
}
| {
sz := len(s.stack)
if sz > h {
s.stack = s.stack[:h]
}
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.