file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
main.js | optional.",
"image6": "img/slideline_placeholder_images_6.jpg",
"caption6": "This is the sixth caption. This is optional.",
"label6": "Label 6. This is optional.",
}
};
$(document).ready(function(){
// variables for values
var val;
var wholeVal;
var decVal;
var last = 0;
var lastStop = 0;
// varables for each element
var prevImage;
var currentImage;
var nextImage;
var prevLabel;
var currentLabel;
var nextLabel;
var prevCaption;
var currentCaption;
var nextCaption;
// variables for catching and storing values during slide
var oldVal;
var currentVal;
var newVal;
var dataSource = s9.initialParams['dataSource'];
function clearGhosts(start, end) {
$('.image').each(function() {
if (this.getAttribute('id') == 'image' + start || this.getAttribute('id') == 'image' + end) {
return;
}
$(this).css({opacity: 0});
});
}
//This function initializes the jQuery slider and shows the first slide
function buildSlider(){
$('#slide_images #image1').css("opacity", 1);
$('#slide_labels #label1').css("opacity", 1);
$('#slide_captions #caption1').css("opacity", 1);
currentImage = document.getElementById('image1');
currentLabel = document.getElementById('label1');
currentCaption = document.getElementById('caption1');
$("#slider").slider({
animate: true,
value: 1,
min: 1,
max: $('#slide_images .image').size(),
step: .01,
slide: function (event, ui) {
$('.ui-slider-handle').removeClass('pulsing');
sliderPos = (ui.value); //ex: 1.25
wholeSliderPos = Math.floor(sliderPos); //ex: 1
decVal = sliderPos - wholeSliderPos; // ex: 1.25 - 1 (=.25)
//console.log('sliding: ' + decVal);
var rangeStart = Math.floor(sliderPos);
var rangeEnd = Math.ceil(sliderPos);
if (lastStop > 0 && lastStop != rangeStart && lastStop != rangeEnd){
var old = $('#image' + lastStop);
old.css('opacity', 0);
}
prevImage = document.getElementById('image' + (wholeSliderPos - 1));
currentImage = document.getElementById('image' + wholeSliderPos);
nextImage = document.getElementById('image' + (wholeSliderPos + 1));
prevLabel = document.getElementById('label' + (wholeSliderPos - 1));
currentLabel = document.getElementById('label' + wholeSliderPos);
nextLabel = document.getElementById('label' + (wholeSliderPos + 1));
prevCaption = document.getElementById('caption' + (wholeSliderPos - 1));
currentCaption = document.getElementById('caption' + wholeSliderPos);
nextCaption = document.getElementById('caption' + (wholeSliderPos + 1));
if (ui.value > last) {
$(currentImage).css("opacity", 1 - decVal);
$(nextImage).css("opacity", decVal);
}
if (ui.value < last) {
$(currentImage).css("opacity", 1 - decVal);
$(nextImage).css("opacity", decVal);
}
if (Math.floor(last) != wholeSliderPos) {
clearGhosts(rangeStart, rangeEnd);
}
last = ui.value;
},
stop: function( event, ui ) {
$('.ui-slider-handle').removeClass('pulsing');
var wholeVal = Math.round(ui.value);
$( "#slider" ).slider( "value", wholeVal );
// console.log('stop: ' + wholeVal);
prevImage = document.getElementById('image' + (wholeVal - 1));
currentImage = document.getElementById('image' + wholeVal);
nextImage = document.getElementById('image' + (wholeVal + 1));
prevLabel = document.getElementById('label' + (wholeVal - 1));
currentLabel = document.getElementById('label' + wholeVal);
nextLabel = document.getElementById('label' + (wholeVal + 1));
prevCaption = document.getElementById('caption' + (wholeVal - 1));
currentCaption = document.getElementById('caption' + wholeVal);
nextCaption = document.getElementById('caption' + (wholeVal + 1));
$('.image').css("opacity", 0);
$('.label').css("opacity", 0);
$('.caption').css("opacity", 0);
$(currentImage).css("opacity", 1);
$(currentLabel).css("opacity", 1);
$(currentCaption).css("opacity", 1);
last = wholeVal;
lastStop = wholeVal;
}
});
}
//This function draws the tick marks/pips for the slider. It must be called after the slider's max is set.
function setSliderTicks(){
var $slider = $('#slider');
var max = $slider.slider("option", "max");
if (max > 1) {
var spacing = 100 / (max -1);
} else {
var spacing = 50;
}
$slider.find('.ui-slider-tick-mark').remove();
for (var i = 0; i < max ; i++) {
$('<span class="ui-slider-tick-mark"></span>').css('left', (spacing * i) + '%').appendTo($slider);
}
}
// Extract the text from the template .html() is the jquery helper method for that
var image_template = $('#image-temp').html();
var label_template = $('#label-temp').html();
var caption_template = $('#caption-temp').html();
// Compile that into an handlebars template
var imageTemplate = Handlebars.compile(image_template);
var labelTemplate = Handlebars.compile(label_template);
var captionTemplate = Handlebars.compile(caption_template);
// Retrieve the placeHolder where the Posts will be displayed
var imageHolder = $("#slide_images");
var labelHolder = $("#slide_labels");
var captionHolder = $("#slide_captions");
var imgArr = []
var slideNum = 0;
for(var key in s9.initialParams) {
if (key.slice(0, 5) == 'image'){
slideNum++;
imgArr.push(s9.initialParams[key]);
}
}
var slideInfo = {
'slideData': []
};
for(var i=0; i<slideNum; i++){
var temp = i+1;
// console.log("loading slide " + temp);
if (!s9.initialParams['image'+(i+1)]) {
continue;
}
slideInfo['slideData'].push({
image : s9.initialParams['image'+(i+1)],
caption : s9.initialParams['caption'+(i+1)],
label : s9.initialParams['label'+(i+1)],
index : temp
});
};
if (slideInfo.slideData.length == 0){
//Show default - placeholder images.
slideInfo.slideData = [{
image: 'img/placeholder-image1.svg',
caption: 'Caption 1',
label: 'Label 1',
index: 1,
},
{ image: 'img/placeholder-image2.svg',
caption: 'Caption 2',
label: 'Label 2',
index: 2,
},
{
image: 'img/placeholder-image3.svg',
caption: 'Caption 3',
label: 'Label 3',
index: 3,
}];
}
//Add each of the slide images, labels, and captions to the DOM
$.each(slideInfo.slideData,function(index,element){
// Generate the HTML for each post
var imagehtml = imageTemplate(element);
var labelhtml = labelTemplate(element);
var captionhtml = captionTemplate(element);
// Render the posts into the page
imageHolder.append(imagehtml);
if (element.label){
labelHolder.append(labelhtml);
}
if (element.caption) {
captionHolder.append(captionhtml);
}
});
//Size the image div to the tallest slide image
function imageSizing() {
$('#slide_images').css({height: 0});
var numImages = $('.image img').length;
var numDone = 0;
$('.image img').each(function() {
$(this).load(function () {
var w = $('#slide_images').outerWidth();
var ratio = w / this.naturalWidth;
var curHeight = $('#slide_images').height() || 0;
var displayH = this.naturalHeight * ratio;
if (displayH > curHeight){
$('#slide_images').css({height: displayH});
}
numDone++;
if (numDone == numImages) {
setSize();
numDone = 0;
}
$(this).off('load');
});
if (this.complete) | {
$(this).load();
} | conditional_block | |
main.js | var oldVal;
var currentVal;
var newVal;
var dataSource = s9.initialParams['dataSource'];
function clearGhosts(start, end) {
$('.image').each(function() {
if (this.getAttribute('id') == 'image' + start || this.getAttribute('id') == 'image' + end) {
return;
}
$(this).css({opacity: 0});
});
}
//This function initializes the jQuery slider and shows the first slide
function buildSlider(){
$('#slide_images #image1').css("opacity", 1);
$('#slide_labels #label1').css("opacity", 1);
$('#slide_captions #caption1').css("opacity", 1);
currentImage = document.getElementById('image1');
currentLabel = document.getElementById('label1');
currentCaption = document.getElementById('caption1');
$("#slider").slider({
animate: true,
value: 1,
min: 1,
max: $('#slide_images .image').size(),
step: .01,
slide: function (event, ui) {
$('.ui-slider-handle').removeClass('pulsing');
sliderPos = (ui.value); //ex: 1.25
wholeSliderPos = Math.floor(sliderPos); //ex: 1
decVal = sliderPos - wholeSliderPos; // ex: 1.25 - 1 (=.25)
//console.log('sliding: ' + decVal);
var rangeStart = Math.floor(sliderPos);
var rangeEnd = Math.ceil(sliderPos);
if (lastStop > 0 && lastStop != rangeStart && lastStop != rangeEnd){
var old = $('#image' + lastStop);
old.css('opacity', 0);
}
prevImage = document.getElementById('image' + (wholeSliderPos - 1));
currentImage = document.getElementById('image' + wholeSliderPos);
nextImage = document.getElementById('image' + (wholeSliderPos + 1));
prevLabel = document.getElementById('label' + (wholeSliderPos - 1));
currentLabel = document.getElementById('label' + wholeSliderPos);
nextLabel = document.getElementById('label' + (wholeSliderPos + 1));
prevCaption = document.getElementById('caption' + (wholeSliderPos - 1));
currentCaption = document.getElementById('caption' + wholeSliderPos);
nextCaption = document.getElementById('caption' + (wholeSliderPos + 1));
if (ui.value > last) {
$(currentImage).css("opacity", 1 - decVal);
$(nextImage).css("opacity", decVal);
}
if (ui.value < last) {
$(currentImage).css("opacity", 1 - decVal);
$(nextImage).css("opacity", decVal);
}
if (Math.floor(last) != wholeSliderPos) {
clearGhosts(rangeStart, rangeEnd);
}
last = ui.value;
},
stop: function( event, ui ) {
$('.ui-slider-handle').removeClass('pulsing');
var wholeVal = Math.round(ui.value);
$( "#slider" ).slider( "value", wholeVal );
// console.log('stop: ' + wholeVal);
prevImage = document.getElementById('image' + (wholeVal - 1));
currentImage = document.getElementById('image' + wholeVal);
nextImage = document.getElementById('image' + (wholeVal + 1));
prevLabel = document.getElementById('label' + (wholeVal - 1));
currentLabel = document.getElementById('label' + wholeVal);
nextLabel = document.getElementById('label' + (wholeVal + 1));
prevCaption = document.getElementById('caption' + (wholeVal - 1));
currentCaption = document.getElementById('caption' + wholeVal);
nextCaption = document.getElementById('caption' + (wholeVal + 1));
$('.image').css("opacity", 0);
$('.label').css("opacity", 0);
$('.caption').css("opacity", 0);
$(currentImage).css("opacity", 1);
$(currentLabel).css("opacity", 1);
$(currentCaption).css("opacity", 1);
last = wholeVal;
lastStop = wholeVal;
}
});
}
//This function draws the tick marks/pips for the slider. It must be called after the slider's max is set.
function setSliderTicks(){
var $slider = $('#slider');
var max = $slider.slider("option", "max");
if (max > 1) {
var spacing = 100 / (max -1);
} else {
var spacing = 50;
}
$slider.find('.ui-slider-tick-mark').remove();
for (var i = 0; i < max ; i++) {
$('<span class="ui-slider-tick-mark"></span>').css('left', (spacing * i) + '%').appendTo($slider);
}
}
// Extract the text from the template .html() is the jquery helper method for that
var image_template = $('#image-temp').html();
var label_template = $('#label-temp').html();
var caption_template = $('#caption-temp').html();
// Compile that into an handlebars template
var imageTemplate = Handlebars.compile(image_template);
var labelTemplate = Handlebars.compile(label_template);
var captionTemplate = Handlebars.compile(caption_template);
// Retrieve the placeHolder where the Posts will be displayed
var imageHolder = $("#slide_images");
var labelHolder = $("#slide_labels");
var captionHolder = $("#slide_captions");
var imgArr = []
var slideNum = 0;
for(var key in s9.initialParams) {
if (key.slice(0, 5) == 'image'){
slideNum++;
imgArr.push(s9.initialParams[key]);
}
}
var slideInfo = {
'slideData': []
};
for(var i=0; i<slideNum; i++){
var temp = i+1;
// console.log("loading slide " + temp);
if (!s9.initialParams['image'+(i+1)]) {
continue;
}
slideInfo['slideData'].push({
image : s9.initialParams['image'+(i+1)],
caption : s9.initialParams['caption'+(i+1)],
label : s9.initialParams['label'+(i+1)],
index : temp
});
};
if (slideInfo.slideData.length == 0){
//Show default - placeholder images.
slideInfo.slideData = [{
image: 'img/placeholder-image1.svg',
caption: 'Caption 1',
label: 'Label 1',
index: 1,
},
{ image: 'img/placeholder-image2.svg',
caption: 'Caption 2',
label: 'Label 2',
index: 2,
},
{
image: 'img/placeholder-image3.svg',
caption: 'Caption 3',
label: 'Label 3',
index: 3,
}];
}
//Add each of the slide images, labels, and captions to the DOM
$.each(slideInfo.slideData,function(index,element){
// Generate the HTML for each post
var imagehtml = imageTemplate(element);
var labelhtml = labelTemplate(element);
var captionhtml = captionTemplate(element);
// Render the posts into the page
imageHolder.append(imagehtml);
if (element.label){
labelHolder.append(labelhtml);
}
if (element.caption) {
captionHolder.append(captionhtml);
}
});
//Size the image div to the tallest slide image
function imageSizing() {
$('#slide_images').css({height: 0});
var numImages = $('.image img').length;
var numDone = 0;
$('.image img').each(function() {
$(this).load(function () {
var w = $('#slide_images').outerWidth();
var ratio = w / this.naturalWidth;
var curHeight = $('#slide_images').height() || 0;
var displayH = this.naturalHeight * ratio;
if (displayH > curHeight){
$('#slide_images').css({height: displayH});
}
numDone++;
if (numDone == numImages) {
setSize();
numDone = 0;
}
$(this).off('load');
});
if (this.complete) {
$(this).load();
}
});
}
if ($('.label').length == 0){
$('#slide_labels').hide();
$('#slide_captions').css({'border-top': 'none'});
} else {
labelSizing();
}
if ($('.caption').length == 0){
$('#slide_captions').hide();
} else {
captionSizing();
}
// build the slideline slider
buildSlider();
//add ticks to slideline slider
setSliderTicks();
//set heights of image container
imageSizing();
//This function sets the size of the caption & label areas to the tallest ones
function | captionSizing | identifier_name | |
main.js | is optional.",
"label5": "Label 5. This is optional.",
"image6": "img/slideline_placeholder_images_6.jpg",
"caption6": "This is the sixth caption. This is optional.",
"label6": "Label 6. This is optional.",
}
};
$(document).ready(function(){
// variables for values
var val;
var wholeVal;
var decVal;
var last = 0;
var lastStop = 0;
// varables for each element
var prevImage;
var currentImage;
var nextImage;
var prevLabel;
var currentLabel;
var nextLabel; |
// variables for catching and storing values during slide
var oldVal;
var currentVal;
var newVal;
var dataSource = s9.initialParams['dataSource'];
function clearGhosts(start, end) {
$('.image').each(function() {
if (this.getAttribute('id') == 'image' + start || this.getAttribute('id') == 'image' + end) {
return;
}
$(this).css({opacity: 0});
});
}
//This function initializes the jQuery slider and shows the first slide
function buildSlider(){
$('#slide_images #image1').css("opacity", 1);
$('#slide_labels #label1').css("opacity", 1);
$('#slide_captions #caption1').css("opacity", 1);
currentImage = document.getElementById('image1');
currentLabel = document.getElementById('label1');
currentCaption = document.getElementById('caption1');
$("#slider").slider({
animate: true,
value: 1,
min: 1,
max: $('#slide_images .image').size(),
step: .01,
slide: function (event, ui) {
$('.ui-slider-handle').removeClass('pulsing');
sliderPos = (ui.value); //ex: 1.25
wholeSliderPos = Math.floor(sliderPos); //ex: 1
decVal = sliderPos - wholeSliderPos; // ex: 1.25 - 1 (=.25)
//console.log('sliding: ' + decVal);
var rangeStart = Math.floor(sliderPos);
var rangeEnd = Math.ceil(sliderPos);
if (lastStop > 0 && lastStop != rangeStart && lastStop != rangeEnd){
var old = $('#image' + lastStop);
old.css('opacity', 0);
}
prevImage = document.getElementById('image' + (wholeSliderPos - 1));
currentImage = document.getElementById('image' + wholeSliderPos);
nextImage = document.getElementById('image' + (wholeSliderPos + 1));
prevLabel = document.getElementById('label' + (wholeSliderPos - 1));
currentLabel = document.getElementById('label' + wholeSliderPos);
nextLabel = document.getElementById('label' + (wholeSliderPos + 1));
prevCaption = document.getElementById('caption' + (wholeSliderPos - 1));
currentCaption = document.getElementById('caption' + wholeSliderPos);
nextCaption = document.getElementById('caption' + (wholeSliderPos + 1));
if (ui.value > last) {
$(currentImage).css("opacity", 1 - decVal);
$(nextImage).css("opacity", decVal);
}
if (ui.value < last) {
$(currentImage).css("opacity", 1 - decVal);
$(nextImage).css("opacity", decVal);
}
if (Math.floor(last) != wholeSliderPos) {
clearGhosts(rangeStart, rangeEnd);
}
last = ui.value;
},
stop: function( event, ui ) {
$('.ui-slider-handle').removeClass('pulsing');
var wholeVal = Math.round(ui.value);
$( "#slider" ).slider( "value", wholeVal );
// console.log('stop: ' + wholeVal);
prevImage = document.getElementById('image' + (wholeVal - 1));
currentImage = document.getElementById('image' + wholeVal);
nextImage = document.getElementById('image' + (wholeVal + 1));
prevLabel = document.getElementById('label' + (wholeVal - 1));
currentLabel = document.getElementById('label' + wholeVal);
nextLabel = document.getElementById('label' + (wholeVal + 1));
prevCaption = document.getElementById('caption' + (wholeVal - 1));
currentCaption = document.getElementById('caption' + wholeVal);
nextCaption = document.getElementById('caption' + (wholeVal + 1));
$('.image').css("opacity", 0);
$('.label').css("opacity", 0);
$('.caption').css("opacity", 0);
$(currentImage).css("opacity", 1);
$(currentLabel).css("opacity", 1);
$(currentCaption).css("opacity", 1);
last = wholeVal;
lastStop = wholeVal;
}
});
}
//This function draws the tick marks/pips for the slider. It must be called after the slider's max is set.
function setSliderTicks(){
var $slider = $('#slider');
var max = $slider.slider("option", "max");
if (max > 1) {
var spacing = 100 / (max -1);
} else {
var spacing = 50;
}
$slider.find('.ui-slider-tick-mark').remove();
for (var i = 0; i < max ; i++) {
$('<span class="ui-slider-tick-mark"></span>').css('left', (spacing * i) + '%').appendTo($slider);
}
}
// Extract the text from the template .html() is the jquery helper method for that
var image_template = $('#image-temp').html();
var label_template = $('#label-temp').html();
var caption_template = $('#caption-temp').html();
// Compile that into an handlebars template
var imageTemplate = Handlebars.compile(image_template);
var labelTemplate = Handlebars.compile(label_template);
var captionTemplate = Handlebars.compile(caption_template);
// Retrieve the placeHolder where the Posts will be displayed
var imageHolder = $("#slide_images");
var labelHolder = $("#slide_labels");
var captionHolder = $("#slide_captions");
var imgArr = []
var slideNum = 0;
for(var key in s9.initialParams) {
if (key.slice(0, 5) == 'image'){
slideNum++;
imgArr.push(s9.initialParams[key]);
}
}
var slideInfo = {
'slideData': []
};
for(var i=0; i<slideNum; i++){
var temp = i+1;
// console.log("loading slide " + temp);
if (!s9.initialParams['image'+(i+1)]) {
continue;
}
slideInfo['slideData'].push({
image : s9.initialParams['image'+(i+1)],
caption : s9.initialParams['caption'+(i+1)],
label : s9.initialParams['label'+(i+1)],
index : temp
});
};
if (slideInfo.slideData.length == 0){
//Show default - placeholder images.
slideInfo.slideData = [{
image: 'img/placeholder-image1.svg',
caption: 'Caption 1',
label: 'Label 1',
index: 1,
},
{ image: 'img/placeholder-image2.svg',
caption: 'Caption 2',
label: 'Label 2',
index: 2,
},
{
image: 'img/placeholder-image3.svg',
caption: 'Caption 3',
label: 'Label 3',
index: 3,
}];
}
//Add each of the slide images, labels, and captions to the DOM
$.each(slideInfo.slideData,function(index,element){
// Generate the HTML for each post
var imagehtml = imageTemplate(element);
var labelhtml = labelTemplate(element);
var captionhtml = captionTemplate(element);
// Render the posts into the page
imageHolder.append(imagehtml);
if (element.label){
labelHolder.append(labelhtml);
}
if (element.caption) {
captionHolder.append(captionhtml);
}
});
//Size the image div to the tallest slide image
function imageSizing() {
$('#slide_images').css({height: 0});
var numImages = $('.image img').length;
var numDone = 0;
$('.image img').each(function() {
$(this).load(function () {
var w = $('#slide_images').outerWidth();
var ratio = w / this.naturalWidth;
var curHeight = $('#slide_images').height() || 0;
var displayH = this.naturalHeight * ratio;
if (displayH > curHeight){
$('#slide_images').css({height: displayH});
}
numDone++;
if (numDone == numImages) {
setSize();
numDone = 0;
}
$(this).off('load');
});
| var prevCaption;
var currentCaption;
var nextCaption; | random_line_split |
main.js | is optional.",
"label5": "Label 5. This is optional.",
"image6": "img/slideline_placeholder_images_6.jpg",
"caption6": "This is the sixth caption. This is optional.",
"label6": "Label 6. This is optional.",
}
};
$(document).ready(function(){
// variables for values
var val;
var wholeVal;
var decVal;
var last = 0;
var lastStop = 0;
// varables for each element
var prevImage;
var currentImage;
var nextImage;
var prevLabel;
var currentLabel;
var nextLabel;
var prevCaption;
var currentCaption;
var nextCaption;
// variables for catching and storing values during slide
var oldVal;
var currentVal;
var newVal;
var dataSource = s9.initialParams['dataSource'];
function clearGhosts(start, end) |
//This function initializes the jQuery slider and shows the first slide
function buildSlider(){
$('#slide_images #image1').css("opacity", 1);
$('#slide_labels #label1').css("opacity", 1);
$('#slide_captions #caption1').css("opacity", 1);
currentImage = document.getElementById('image1');
currentLabel = document.getElementById('label1');
currentCaption = document.getElementById('caption1');
$("#slider").slider({
animate: true,
value: 1,
min: 1,
max: $('#slide_images .image').size(),
step: .01,
slide: function (event, ui) {
$('.ui-slider-handle').removeClass('pulsing');
sliderPos = (ui.value); //ex: 1.25
wholeSliderPos = Math.floor(sliderPos); //ex: 1
decVal = sliderPos - wholeSliderPos; // ex: 1.25 - 1 (=.25)
//console.log('sliding: ' + decVal);
var rangeStart = Math.floor(sliderPos);
var rangeEnd = Math.ceil(sliderPos);
if (lastStop > 0 && lastStop != rangeStart && lastStop != rangeEnd){
var old = $('#image' + lastStop);
old.css('opacity', 0);
}
prevImage = document.getElementById('image' + (wholeSliderPos - 1));
currentImage = document.getElementById('image' + wholeSliderPos);
nextImage = document.getElementById('image' + (wholeSliderPos + 1));
prevLabel = document.getElementById('label' + (wholeSliderPos - 1));
currentLabel = document.getElementById('label' + wholeSliderPos);
nextLabel = document.getElementById('label' + (wholeSliderPos + 1));
prevCaption = document.getElementById('caption' + (wholeSliderPos - 1));
currentCaption = document.getElementById('caption' + wholeSliderPos);
nextCaption = document.getElementById('caption' + (wholeSliderPos + 1));
if (ui.value > last) {
$(currentImage).css("opacity", 1 - decVal);
$(nextImage).css("opacity", decVal);
}
if (ui.value < last) {
$(currentImage).css("opacity", 1 - decVal);
$(nextImage).css("opacity", decVal);
}
if (Math.floor(last) != wholeSliderPos) {
clearGhosts(rangeStart, rangeEnd);
}
last = ui.value;
},
stop: function( event, ui ) {
$('.ui-slider-handle').removeClass('pulsing');
var wholeVal = Math.round(ui.value);
$( "#slider" ).slider( "value", wholeVal );
// console.log('stop: ' + wholeVal);
prevImage = document.getElementById('image' + (wholeVal - 1));
currentImage = document.getElementById('image' + wholeVal);
nextImage = document.getElementById('image' + (wholeVal + 1));
prevLabel = document.getElementById('label' + (wholeVal - 1));
currentLabel = document.getElementById('label' + wholeVal);
nextLabel = document.getElementById('label' + (wholeVal + 1));
prevCaption = document.getElementById('caption' + (wholeVal - 1));
currentCaption = document.getElementById('caption' + wholeVal);
nextCaption = document.getElementById('caption' + (wholeVal + 1));
$('.image').css("opacity", 0);
$('.label').css("opacity", 0);
$('.caption').css("opacity", 0);
$(currentImage).css("opacity", 1);
$(currentLabel).css("opacity", 1);
$(currentCaption).css("opacity", 1);
last = wholeVal;
lastStop = wholeVal;
}
});
}
//This function draws the tick marks/pips for the slider. It must be called after the slider's max is set.
function setSliderTicks(){
var $slider = $('#slider');
var max = $slider.slider("option", "max");
if (max > 1) {
var spacing = 100 / (max -1);
} else {
var spacing = 50;
}
$slider.find('.ui-slider-tick-mark').remove();
for (var i = 0; i < max ; i++) {
$('<span class="ui-slider-tick-mark"></span>').css('left', (spacing * i) + '%').appendTo($slider);
}
}
// Extract the text from the template .html() is the jquery helper method for that
var image_template = $('#image-temp').html();
var label_template = $('#label-temp').html();
var caption_template = $('#caption-temp').html();
// Compile that into an handlebars template
var imageTemplate = Handlebars.compile(image_template);
var labelTemplate = Handlebars.compile(label_template);
var captionTemplate = Handlebars.compile(caption_template);
// Retrieve the placeHolder where the Posts will be displayed
var imageHolder = $("#slide_images");
var labelHolder = $("#slide_labels");
var captionHolder = $("#slide_captions");
var imgArr = []
var slideNum = 0;
for(var key in s9.initialParams) {
if (key.slice(0, 5) == 'image'){
slideNum++;
imgArr.push(s9.initialParams[key]);
}
}
var slideInfo = {
'slideData': []
};
for(var i=0; i<slideNum; i++){
var temp = i+1;
// console.log("loading slide " + temp);
if (!s9.initialParams['image'+(i+1)]) {
continue;
}
slideInfo['slideData'].push({
image : s9.initialParams['image'+(i+1)],
caption : s9.initialParams['caption'+(i+1)],
label : s9.initialParams['label'+(i+1)],
index : temp
});
};
if (slideInfo.slideData.length == 0){
//Show default - placeholder images.
slideInfo.slideData = [{
image: 'img/placeholder-image1.svg',
caption: 'Caption 1',
label: 'Label 1',
index: 1,
},
{ image: 'img/placeholder-image2.svg',
caption: 'Caption 2',
label: 'Label 2',
index: 2,
},
{
image: 'img/placeholder-image3.svg',
caption: 'Caption 3',
label: 'Label 3',
index: 3,
}];
}
//Add each of the slide images, labels, and captions to the DOM
$.each(slideInfo.slideData,function(index,element){
// Generate the HTML for each post
var imagehtml = imageTemplate(element);
var labelhtml = labelTemplate(element);
var captionhtml = captionTemplate(element);
// Render the posts into the page
imageHolder.append(imagehtml);
if (element.label){
labelHolder.append(labelhtml);
}
if (element.caption) {
captionHolder.append(captionhtml);
}
});
//Size the image div to the tallest slide image
function imageSizing() {
$('#slide_images').css({height: 0});
var numImages = $('.image img').length;
var numDone = 0;
$('.image img').each(function() {
$(this).load(function () {
var w = $('#slide_images').outerWidth();
var ratio = w / this.naturalWidth;
var curHeight = $('#slide_images').height() || 0;
var displayH = this.naturalHeight * ratio;
if (displayH > curHeight){
$('#slide_images').css({height: displayH});
}
numDone++;
if (numDone == numImages) {
setSize();
numDone = 0;
}
$(this).off('load');
| {
$('.image').each(function() {
if (this.getAttribute('id') == 'image' + start || this.getAttribute('id') == 'image' + end) {
return;
}
$(this).css({opacity: 0});
});
} | identifier_body |
iconfont.js | 4.872 0 0 1 5.048 4.568l2.544 52.656a4.816 4.816 0 0 1-4.56 5.04h-0.232z" fill="#FDF1F6" ></path>' +
'' +
'<path d="M378.752 490.664l-0.232-0.008-52.656-2.552a4.808 4.808 0 0 1 0.472-9.616l52.64 2.56a4.8 4.8 0 0 1-0.224 9.616z" fill="#FDF1F6" ></path>' +
'' +
'<path d="M378.736 490.664a4.824 4.824 0 0 1-4.8-4.576l-2.544-52.656a4.808 4.808 0 0 1 4.56-5.04 4.848 4.848 0 0 1 5.032 4.568l2.56 52.664a4.792 4.792 0 0 1-4.576 5.032l-0.232 0.008z" fill="#FDF1F6" ></path>' +
'' +
'</symbol>' +
'' +
'<symbol id="icon-chakangengduo" viewBox="0 0 1027 1024">' +
'' +
'<path d="M512 0c-282.799601 0-512 229.200399-512 512s229.200399 512 512 512S1024 794.799601 1024 512 794.799601 0 512 0z m1.020937 989.798604c-263.912263 0-477.798604-213.886341-477.798604-477.798604s213.886341-477.798604 477.798604-477.798604 477.798604 213.886341 477.798604 477.798604-213.886341 477.798604-477.798604 477.798604z" fill="" ></path>' +
'' +
'<path d="M860.139581 512c0-4.594217-2.041874-9.188435-5.104686-12.251246l-191.936191-191.936192c-3.062812-3.062812-7.14656-4.594217-11.740778-4.594217-9.188435 0-16.845464 7.657029-16.845463 16.845464 0 4.594217 1.531406 8.677966 4.594217 11.740777l0.510469 0.510469 161.308075 161.308076h-620.219342c-9.188435 0-16.845464 7.657029-16.845463 16.845463s7.657029 16.845464 16.845463 16.845464h623.792622l-164.881355 164.881356c-3.062812 3.062812-5.104686 7.14656-5.104686 12.251246 0 9.188435 7.657029 16.845464 16.845463 16.845464 4.594217 0 8.677966-2.041874 11.740778-5.104686l180.195414-180.195414 12.251246-12.251246c2.552343-3.062812 4.594217-7.14656 4.594217-11.740778z" fill="" ></path>' +
'' +
'</symbol>' +
'' +
'<symbol id="icon-chakangengduo1" viewBox="0 0 1024 1024">' +
'' +
'<path d="M67.54304 958.23872l-24.68352-30.31552 519.63904-423.17824L39.67488 81.51552l24.6016-30.39232 560.25088 453.53472z" ></path>' +
'' +
'<path d="M435.01056 971.13088l-24.68352-30.31552 519.64416-423.17824-522.82368-423.23456 24.59648-30.3872 560.25088 453.53472z" ></path>' +
'' +
'</symbol>' +
'' +
'</svg>'
var script = function() {
var scripts = document.getElementsByTagName('script')
return scripts[scripts.length - 1]
}()
var shouldInjectCss = script.getAttribute("data-injectcss")
/**
* document ready
*/
var ready = function(fn) {
if (document.addEventListener) {
if (~["complete", "loaded", "interactive"].indexOf(document.readyState)) {
setTimeout(fn, 0)
} else {
var loadFn = function() {
document.removeEventListener("DOMContentLoaded", loadFn, false)
fn()
}
document.addEventListener("DOMContentLoaded", loadFn, false)
}
} else if (document.attachEvent) {
IEContentLoaded(window, fn)
}
function IEContentLoaded(w, fn) | {
var d = w.document,
done = false,
// only fire once
init = function() {
if (!done) {
done = true
fn()
}
}
// polling for no errors
var polling = function() {
try {
// throws errors until after ondocumentready
d.documentElement.doScroll('left')
} catch (e) {
setTimeout(polling, 50)
return
}
// no errors, fire | identifier_body | |
iconfont.js | .616l52.64 2.56a4.8 4.8 0 0 1-0.224 9.616z" fill="#FDF1F6" ></path>' +
'' +
'<path d="M378.736 490.664a4.824 4.824 0 0 1-4.8-4.576l-2.544-52.656a4.808 4.808 0 0 1 4.56-5.04 4.848 4.848 0 0 1 5.032 4.568l2.56 52.664a4.792 4.792 0 0 1-4.576 5.032l-0.232 0.008z" fill="#FDF1F6" ></path>' +
'' +
'</symbol>' +
'' +
'<symbol id="icon-chakangengduo" viewBox="0 0 1027 1024">' +
'' +
'<path d="M512 0c-282.799601 0-512 229.200399-512 512s229.200399 512 512 512S1024 794.799601 1024 512 794.799601 0 512 0z m1.020937 989.798604c-263.912263 0-477.798604-213.886341-477.798604-477.798604s213.886341-477.798604 477.798604-477.798604 477.798604 213.886341 477.798604 477.798604-213.886341 477.798604-477.798604 477.798604z" fill="" ></path>' +
'' +
'<path d="M860.139581 512c0-4.594217-2.041874-9.188435-5.104686-12.251246l-191.936191-191.936192c-3.062812-3.062812-7.14656-4.594217-11.740778-4.594217-9.188435 0-16.845464 7.657029-16.845463 16.845464 0 4.594217 1.531406 8.677966 4.594217 11.740777l0.510469 0.510469 161.308075 161.308076h-620.219342c-9.188435 0-16.845464 7.657029-16.845463 16.845463s7.657029 16.845464 16.845463 16.845464h623.792622l-164.881355 164.881356c-3.062812 3.062812-5.104686 7.14656-5.104686 12.251246 0 9.188435 7.657029 16.845464 16.845463 16.845464 4.594217 0 8.677966-2.041874 11.740778-5.104686l180.195414-180.195414 12.251246-12.251246c2.552343-3.062812 4.594217-7.14656 4.594217-11.740778z" fill="" ></path>' +
'' +
'</symbol>' +
'' +
'<symbol id="icon-chakangengduo1" viewBox="0 0 1024 1024">' +
'' +
'<path d="M67.54304 958.23872l-24.68352-30.31552 519.63904-423.17824L39.67488 81.51552l24.6016-30.39232 560.25088 453.53472z" ></path>' +
'' +
'<path d="M435.01056 971.13088l-24.68352-30.31552 519.64416-423.17824-522.82368-423.23456 24.59648-30.3872 560.25088 453.53472z" ></path>' +
'' +
'</symbol>' +
'' +
'</svg>'
var script = function() {
var scripts = document.getElementsByTagName('script')
return scripts[scripts.length - 1]
}()
var shouldInjectCss = script.getAttribute("data-injectcss")
/**
* document ready
*/
var ready = function(fn) {
if (document.addEventListener) {
if (~["complete", "loaded", "interactive"].indexOf(document.readyState)) {
setTimeout(fn, 0)
} else {
var loadFn = function() {
document.removeEventListener("DOMContentLoaded", loadFn, false)
fn()
}
document.addEventListener("DOMContentLoaded", loadFn, false)
}
} else if (document.attachEvent) {
IEContentLoaded(window, fn)
}
function IEContentLoaded(w, fn) {
var d = w.document,
done = false,
// only fire once
init = function() {
if (!done) {
done = true
fn()
}
}
// polling for no errors
var polling = function() {
try {
// throws errors until after ondocumentready
d.documentElement.doScroll('left')
} catch (e) {
setTimeout(polling, 50)
return
}
// no errors, fire
init()
};
polling()
// trying to always fire before onload
d.onreadystatechange = function() {
if (d.readyState == 'complete') {
d.onreadystatechange = null
init()
}
}
}
}
/**
* Insert el before target
*
* @param {Element} el
* @param {Element} target
*/
var before = function(el, target) {
target.parentNode.insertBefore(el, target)
}
/**
* Prepend el to target
*
* @param {Element} el
* @param {Element} target
*/
var prepend = function(el, target) {
if (target.firstChild) | {
before(el, target.firstChild)
} | conditional_block | |
iconfont.js | a4.8 4.8 0 0 1-0.224 9.616z" fill="#FDF1F6" ></path>' +
'' +
'<path d="M378.736 490.664a4.824 4.824 0 0 1-4.8-4.576l-2.544-52.656a4.808 4.808 0 0 1 4.56-5.04 4.848 4.848 0 0 1 5.032 4.568l2.56 52.664a4.792 4.792 0 0 1-4.576 5.032l-0.232 0.008z" fill="#FDF1F6" ></path>' +
'' +
'</symbol>' +
'' +
'<symbol id="icon-chakangengduo" viewBox="0 0 1027 1024">' +
'' +
'<path d="M512 0c-282.799601 0-512 229.200399-512 512s229.200399 512 512 512S1024 794.799601 1024 512 794.799601 0 512 0z m1.020937 989.798604c-263.912263 0-477.798604-213.886341-477.798604-477.798604s213.886341-477.798604 477.798604-477.798604 477.798604 213.886341 477.798604 477.798604-213.886341 477.798604-477.798604 477.798604z" fill="" ></path>' +
'' +
'<path d="M860.139581 512c0-4.594217-2.041874-9.188435-5.104686-12.251246l-191.936191-191.936192c-3.062812-3.062812-7.14656-4.594217-11.740778-4.594217-9.188435 0-16.845464 7.657029-16.845463 16.845464 0 4.594217 1.531406 8.677966 4.594217 11.740777l0.510469 0.510469 161.308075 161.308076h-620.219342c-9.188435 0-16.845464 7.657029-16.845463 16.845463s7.657029 16.845464 16.845463 16.845464h623.792622l-164.881355 164.881356c-3.062812 3.062812-5.104686 7.14656-5.104686 12.251246 0 9.188435 7.657029 16.845464 16.845463 16.845464 4.594217 0 8.677966-2.041874 11.740778-5.104686l180.195414-180.195414 12.251246-12.251246c2.552343-3.062812 4.594217-7.14656 4.594217-11.740778z" fill="" ></path>' +
'' +
'</symbol>' +
'' +
'<symbol id="icon-chakangengduo1" viewBox="0 0 1024 1024">' +
'' +
'<path d="M67.54304 958.23872l-24.68352-30.31552 519.63904-423.17824L39.67488 81.51552l24.6016-30.39232 560.25088 453.53472z" ></path>' +
'' +
'<path d="M435.01056 971.13088l-24.68352-30.31552 519.64416-423.17824-522.82368-423.23456 24.59648-30.3872 560.25088 453.53472z" ></path>' +
'' +
'</symbol>' +
'' +
'</svg>'
var script = function() {
var scripts = document.getElementsByTagName('script')
return scripts[scripts.length - 1]
}()
var shouldInjectCss = script.getAttribute("data-injectcss")
/**
* document ready
*/
var ready = function(fn) {
if (document.addEventListener) {
if (~["complete", "loaded", "interactive"].indexOf(document.readyState)) {
setTimeout(fn, 0)
} else {
var loadFn = function() {
document.removeEventListener("DOMContentLoaded", loadFn, false)
fn()
}
document.addEventListener("DOMContentLoaded", loadFn, false)
}
} else if (document.attachEvent) {
IEContentLoaded(window, fn)
}
function IEContentLoaded(w, fn) {
var d = w.document,
done = false,
// only fire once
init = function() {
if (!done) {
done = true
fn()
}
}
// polling for no errors
var polling = function() {
try {
// throws errors until after ondocumentready
d.documentElement.doScroll('left')
} catch (e) {
setTimeout(polling, 50)
return
}
// no errors, fire
init()
};
polling()
// trying to always fire before onload
d.onreadystatechange = function() {
if (d.readyState == 'complete') {
d.onreadystatechange = null
init()
}
}
}
}
/**
* Insert el before target
*
* @param {Element} el
* @param {Element} target
*/
var before = function(el, target) {
target.parentNode.insertBefore(el, target)
}
/**
* Prepend el to target
*
* @param {Element} el
* @param {Element} target
*/
var prepend = function(el, target) {
if (target.firstChild) {
before(el, target.firstChild)
} else {
target.appendChild(el)
}
}
function | appendSvg | identifier_name | |
iconfont.js | .928l-0.248-0.008-52.656-2.56a4.784 4.784 0 0 1-4.56-5.032 4.832 4.832 0 0 1 5.032-4.568l52.656 2.56a4.784 4.784 0 0 1 4.56 5.032 4.784 4.784 0 0 1-4.784 4.576z" fill="#FDF1F6" ></path>' +
'' +
'<path d="M469.016 631.024a4.824 4.824 0 0 1-4.816-4.576l-2.544-52.648a4.8 4.8 0 0 1 4.56-5.04 4.872 4.872 0 0 1 5.048 4.568l2.544 52.656a4.816 4.816 0 0 1-4.56 5.04h-0.232z" fill="#FDF1F6" ></path>' +
'' +
'<path d="M378.752 490.664l-0.232-0.008-52.656-2.552a4.808 4.808 0 0 1 0.472-9.616l52.64 2.56a4.8 4.8 0 0 1-0.224 9.616z" fill="#FDF1F6" ></path>' +
'' +
'<path d="M378.736 490.664a4.824 4.824 0 0 1-4.8-4.576l-2.544-52.656a4.808 4.808 0 0 1 4.56-5.04 4.848 4.848 0 0 1 5.032 4.568l2.56 52.664a4.792 4.792 0 0 1-4.576 5.032l-0.232 0.008z" fill="#FDF1F6" ></path>' +
'' +
'</symbol>' +
'' +
'<symbol id="icon-chakangengduo" viewBox="0 0 1027 1024">' +
'' +
'<path d="M512 0c-282.799601 0-512 229.200399-512 512s229.200399 512 512 512S1024 794.799601 1024 512 794.799601 0 512 0z m1.020937 989.798604c-263.912263 0-477.798604-213.886341-477.798604-477.798604s213.886341-477.798604 477.798604-477.798604 477.798604 213.886341 477.798604 477.798604-213.886341 477.798604-477.798604 477.798604z" fill="" ></path>' +
'' +
'<path d="M860.139581 512c0-4.594217-2.041874-9.188435-5.104686-12.251246l-191.936191-191.936192c-3.062812-3.062812-7.14656-4.594217-11.740778-4.594217-9.188435 0-16.845464 7.657029-16.845463 16.845464 0 4.594217 1.531406 8.677966 4.594217 11.740777l0.510469 0.510469 161.308075 161.308076h-620.219342c-9.188435 0-16.845464 7.657029-16.845463 16.845463s7.657029 16.845464 16.845463 16.845464h623.792622l-164.881355 164.881356c-3.062812 3.062812-5.104686 7.14656-5.104686 12.251246 0 9.188435 7.657029 16.845464 16.845463 16.845464 4.594217 0 8.677966-2.041874 11.740778-5.104686l180.195414-180.195414 12.251246-12.251246c2.552343-3.062812 4.594217-7.14656 4.594217-11.740778z" fill="" ></path>' +
'' +
'</symbol>' +
'' +
'<symbol id="icon-chakangengduo1" viewBox="0 0 1024 1024">' +
'' +
'<path d="M67.54304 958.23872l-24.68352-30.31552 519.63904-423.17824L39.67488 81.51552l24.6016-30.39232 560.25088 453.53472z" ></path>' +
'' +
'<path d="M435.01056 971.13088l-24.68352-30.31552 519.64416-423.17824-522.82368-423.23456 24.59648-30.3872 560.25088 453.53472z" ></path>' +
'' + | '</symbol>' + | random_line_split | |
main.rs | AZIOT_IDENTITYD_CONFIG_DIR",
"/etc/aziot/identityd/config.d",
)
.await?
}
ProcessName::Keyd => {
run(
aziot_keyd::main,
"AZIOT_KEYD_CONFIG",
"/etc/aziot/keyd/config.toml",
"AZIOT_KEYD_CONFIG_DIR",
"/etc/aziot/keyd/config.d",
)
.await?
}
ProcessName::Tpmd => {
run(
aziot_tpmd::main,
"AZIOT_TPMD_CONFIG",
"/etc/aziot/tpmd/config.toml",
"AZIOT_TPMD_CONFIG_DIR",
"/etc/aziot/tpmd/config.d",
)
.await?
}
}
Ok(())
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum ProcessName {
Certd,
Identityd,
Keyd,
Tpmd,
}
/// If the symlink is being used to invoke this binary, the process name can be determined
/// from the first arg, ie `argv[0]` in C terms.
///
/// An alternative is supported where the binary is invoked as aziotd itself,
/// and the process name is instead the next arg, ie `argv[1]` in C terms.
/// This is primary useful for local development, so it's only allowed in debug builds.
fn process_name_from_args<I>(args: &mut I) -> Result<ProcessName, Error>
where
I: Iterator,
<I as Iterator>::Item: AsRef<std::ffi::OsStr>,
{
let arg = args.next().ok_or_else(|| {
ErrorKind::GetProcessName("could not extract process name from args".into())
})?;
// arg could be a single component like "aziot-certd", or a path that ends with "aziot-certd",
// so parse it as a Path and get the last component. This does the right thing in either case.
let arg = std::path::Path::new(&arg);
let process_name = arg.file_name().ok_or_else(|| {
ErrorKind::GetProcessName(
format!(
"could not extract process name from arg {:?}",
arg.display(),
)
.into(),
)
})?;
match process_name.to_str() {
Some("aziot-certd") => Ok(ProcessName::Certd),
Some("aziot-identityd") => Ok(ProcessName::Identityd),
Some("aziot-keyd") => Ok(ProcessName::Keyd),
Some("aziot-tpmd") => Ok(ProcessName::Tpmd),
// The next arg is the process name
#[cfg(debug_assertions)]
Some("aziotd") => process_name_from_args(args),
_ => Err(ErrorKind::GetProcessName(
format!("unrecognized process name {:?}", process_name).into(),
)
.into()),
}
}
async fn run<TMain, TConfig, TFuture, TServer>(
main: TMain,
config_env_var: &str,
config_file_default: &str,
config_directory_env_var: &str,
config_directory_default: &str,
) -> Result<(), Error>
where
TMain: FnOnce(TConfig) -> TFuture,
TConfig: serde::de::DeserializeOwned,
TFuture: std::future::Future< | hyper::Request<hyper::Body>,
Response = hyper::Response<hyper::Body>,
Error = std::convert::Infallible,
> + Clone
+ Send
+ 'static,
<TServer as hyper::service::Service<hyper::Request<hyper::Body>>>::Future: Send,
{
log::info!("Starting service...");
log::info!(
"Version - {}",
option_env!("PACKAGE_VERSION").unwrap_or("dev build"),
);
let config_path: std::path::PathBuf =
std::env::var_os(config_env_var).map_or_else(|| config_file_default.into(), Into::into);
let config = std::fs::read(&config_path)
.map_err(|err| ErrorKind::ReadConfig(Some(config_path.clone()), Box::new(err)))?;
let mut config: toml::Value = toml::from_slice(&config)
.map_err(|err| ErrorKind::ReadConfig(Some(config_path), Box::new(err)))?;
let config_directory_path: std::path::PathBuf = std::env::var_os(config_directory_env_var)
.map_or_else(|| config_directory_default.into(), Into::into);
match std::fs::read_dir(&config_directory_path) {
Ok(entries) => {
let mut patch_paths = vec![];
for entry in entries {
let entry = entry.map_err(|err| {
ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err))
})?;
let entry_file_type = entry.file_type().map_err(|err| {
ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err))
})?;
if !entry_file_type.is_file() {
continue;
}
let patch_path = entry.path();
if patch_path.extension().and_then(std::ffi::OsStr::to_str) != Some("toml") {
continue;
}
patch_paths.push(patch_path);
}
patch_paths.sort();
for patch_path in patch_paths {
let patch = std::fs::read(&patch_path).map_err(|err| {
ErrorKind::ReadConfig(Some(patch_path.clone()), Box::new(err))
})?;
let patch: toml::Value = toml::from_slice(&patch)
.map_err(|err| ErrorKind::ReadConfig(Some(patch_path), Box::new(err)))?;
merge_toml(&mut config, patch);
}
}
Err(err) if err.kind() == std::io::ErrorKind::NotFound => (),
Err(err) => {
return Err(ErrorKind::ReadConfig(Some(config_directory_path), Box::new(err)).into())
}
}
let config: TConfig = serde::Deserialize::deserialize(config)
.map_err(|err| ErrorKind::ReadConfig(None, Box::new(err)))?;
let (connector, server) = main(config).await.map_err(ErrorKind::Service)?;
log::info!("Starting server...");
let mut incoming = connector
.incoming()
.await
.map_err(|err| ErrorKind::Service(Box::new(err)))?;
let () = incoming
.serve(server)
.await
.map_err(|err| ErrorKind::Service(Box::new(err)))?;
log::info!("Stopped server.");
Ok(())
}
fn merge_toml(base: &mut toml::Value, patch: toml::Value) {
// Similar to JSON patch, except that:
//
// - Maps are called tables.
// - There is no equivalent of null that can be used to remove keys from an object.
// - Arrays are merged via concatenating the patch to the base, rather than replacing the base with the patch.
if let toml::Value::Table(base) = base {
if let toml::Value::Table(patch) = patch {
for (key, value) in patch {
// Insert a dummy `false` if the original key didn't exist at all. It'll be overwritten by `value` in that case.
let original_value = base.entry(key).or_insert(toml::Value::Boolean(false));
merge_toml(original_value, value);
}
return;
}
}
if let toml::Value::Array(base) = base {
if let toml::Value::Array(patch) = patch {
base.extend(patch);
return;
}
}
*base = patch;
}
#[cfg(test)]
mod tests {
#[test]
fn process_name_from_args() {
// Success test cases
let mut test_cases = vec![
(&["aziot-certd"][..], super::ProcessName::Certd),
(&["aziot-identityd"][..], super::ProcessName::Identityd),
(&["aziot-keyd"][..], super::ProcessName::Keyd),
(&["aziot-tpmd"][..], super::ProcessName::Tpmd),
(
&["/usr/libexec/aziot/aziot-certd"][..],
super::ProcessName::Certd,
),
(
&["/usr/libexec/aziot/aziot-identityd"][..],
super::ProcessName::Identityd,
),
(
&["/usr/libexec/aziot/aziot-keyd"][..],
super::ProcessName::Keyd,
),
(
&["/usr/libexec/aziot/aziot-tpmd"][..],
super::ProcessName::Tpmd,
),
];
// argv[1] fallback is only in release builds.
if cfg!(debug_assertions) {
test_cases.extend_from_slice(&[
(&["aziotd", "aziot-certd"][..], super::ProcessName::Certd),
(
&["aziotd", "aziot-identityd"][..],
super::ProcessName::Identityd,
),
(&["aziotd", "aziot-keyd"][..], super:: | Output = Result<(http_common::Connector, TServer), Box<dyn std::error::Error>>,
>,
TServer: hyper::service::Service< | random_line_split |
main.rs | IOT_IDENTITYD_CONFIG_DIR",
"/etc/aziot/identityd/config.d",
)
.await?
}
ProcessName::Keyd => {
run(
aziot_keyd::main,
"AZIOT_KEYD_CONFIG",
"/etc/aziot/keyd/config.toml",
"AZIOT_KEYD_CONFIG_DIR",
"/etc/aziot/keyd/config.d",
)
.await?
}
ProcessName::Tpmd => |
}
Ok(())
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum ProcessName {
Certd,
Identityd,
Keyd,
Tpmd,
}
/// If the symlink is being used to invoke this binary, the process name can be determined
/// from the first arg, ie `argv[0]` in C terms.
///
/// An alternative is supported where the binary is invoked as aziotd itself,
/// and the process name is instead the next arg, ie `argv[1]` in C terms.
/// This is primary useful for local development, so it's only allowed in debug builds.
fn process_name_from_args<I>(args: &mut I) -> Result<ProcessName, Error>
where
I: Iterator,
<I as Iterator>::Item: AsRef<std::ffi::OsStr>,
{
let arg = args.next().ok_or_else(|| {
ErrorKind::GetProcessName("could not extract process name from args".into())
})?;
// arg could be a single component like "aziot-certd", or a path that ends with "aziot-certd",
// so parse it as a Path and get the last component. This does the right thing in either case.
let arg = std::path::Path::new(&arg);
let process_name = arg.file_name().ok_or_else(|| {
ErrorKind::GetProcessName(
format!(
"could not extract process name from arg {:?}",
arg.display(),
)
.into(),
)
})?;
match process_name.to_str() {
Some("aziot-certd") => Ok(ProcessName::Certd),
Some("aziot-identityd") => Ok(ProcessName::Identityd),
Some("aziot-keyd") => Ok(ProcessName::Keyd),
Some("aziot-tpmd") => Ok(ProcessName::Tpmd),
// The next arg is the process name
#[cfg(debug_assertions)]
Some("aziotd") => process_name_from_args(args),
_ => Err(ErrorKind::GetProcessName(
format!("unrecognized process name {:?}", process_name).into(),
)
.into()),
}
}
async fn run<TMain, TConfig, TFuture, TServer>(
main: TMain,
config_env_var: &str,
config_file_default: &str,
config_directory_env_var: &str,
config_directory_default: &str,
) -> Result<(), Error>
where
TMain: FnOnce(TConfig) -> TFuture,
TConfig: serde::de::DeserializeOwned,
TFuture: std::future::Future<
Output = Result<(http_common::Connector, TServer), Box<dyn std::error::Error>>,
>,
TServer: hyper::service::Service<
hyper::Request<hyper::Body>,
Response = hyper::Response<hyper::Body>,
Error = std::convert::Infallible,
> + Clone
+ Send
+ 'static,
<TServer as hyper::service::Service<hyper::Request<hyper::Body>>>::Future: Send,
{
log::info!("Starting service...");
log::info!(
"Version - {}",
option_env!("PACKAGE_VERSION").unwrap_or("dev build"),
);
let config_path: std::path::PathBuf =
std::env::var_os(config_env_var).map_or_else(|| config_file_default.into(), Into::into);
let config = std::fs::read(&config_path)
.map_err(|err| ErrorKind::ReadConfig(Some(config_path.clone()), Box::new(err)))?;
let mut config: toml::Value = toml::from_slice(&config)
.map_err(|err| ErrorKind::ReadConfig(Some(config_path), Box::new(err)))?;
let config_directory_path: std::path::PathBuf = std::env::var_os(config_directory_env_var)
.map_or_else(|| config_directory_default.into(), Into::into);
match std::fs::read_dir(&config_directory_path) {
Ok(entries) => {
let mut patch_paths = vec![];
for entry in entries {
let entry = entry.map_err(|err| {
ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err))
})?;
let entry_file_type = entry.file_type().map_err(|err| {
ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err))
})?;
if !entry_file_type.is_file() {
continue;
}
let patch_path = entry.path();
if patch_path.extension().and_then(std::ffi::OsStr::to_str) != Some("toml") {
continue;
}
patch_paths.push(patch_path);
}
patch_paths.sort();
for patch_path in patch_paths {
let patch = std::fs::read(&patch_path).map_err(|err| {
ErrorKind::ReadConfig(Some(patch_path.clone()), Box::new(err))
})?;
let patch: toml::Value = toml::from_slice(&patch)
.map_err(|err| ErrorKind::ReadConfig(Some(patch_path), Box::new(err)))?;
merge_toml(&mut config, patch);
}
}
Err(err) if err.kind() == std::io::ErrorKind::NotFound => (),
Err(err) => {
return Err(ErrorKind::ReadConfig(Some(config_directory_path), Box::new(err)).into())
}
}
let config: TConfig = serde::Deserialize::deserialize(config)
.map_err(|err| ErrorKind::ReadConfig(None, Box::new(err)))?;
let (connector, server) = main(config).await.map_err(ErrorKind::Service)?;
log::info!("Starting server...");
let mut incoming = connector
.incoming()
.await
.map_err(|err| ErrorKind::Service(Box::new(err)))?;
let () = incoming
.serve(server)
.await
.map_err(|err| ErrorKind::Service(Box::new(err)))?;
log::info!("Stopped server.");
Ok(())
}
fn merge_toml(base: &mut toml::Value, patch: toml::Value) {
// Similar to JSON patch, except that:
//
// - Maps are called tables.
// - There is no equivalent of null that can be used to remove keys from an object.
// - Arrays are merged via concatenating the patch to the base, rather than replacing the base with the patch.
if let toml::Value::Table(base) = base {
if let toml::Value::Table(patch) = patch {
for (key, value) in patch {
// Insert a dummy `false` if the original key didn't exist at all. It'll be overwritten by `value` in that case.
let original_value = base.entry(key).or_insert(toml::Value::Boolean(false));
merge_toml(original_value, value);
}
return;
}
}
if let toml::Value::Array(base) = base {
if let toml::Value::Array(patch) = patch {
base.extend(patch);
return;
}
}
*base = patch;
}
#[cfg(test)]
mod tests {
#[test]
fn process_name_from_args() {
// Success test cases
let mut test_cases = vec![
(&["aziot-certd"][..], super::ProcessName::Certd),
(&["aziot-identityd"][..], super::ProcessName::Identityd),
(&["aziot-keyd"][..], super::ProcessName::Keyd),
(&["aziot-tpmd"][..], super::ProcessName::Tpmd),
(
&["/usr/libexec/aziot/aziot-certd"][..],
super::ProcessName::Certd,
),
(
&["/usr/libexec/aziot/aziot-identityd"][..],
super::ProcessName::Identityd,
),
(
&["/usr/libexec/aziot/aziot-keyd"][..],
super::ProcessName::Keyd,
),
(
&["/usr/libexec/aziot/aziot-tpmd"][..],
super::ProcessName::Tpmd,
),
];
// argv[1] fallback is only in release builds.
if cfg!(debug_assertions) {
test_cases.extend_from_slice(&[
(&["aziotd", "aziot-certd"][..], super::ProcessName::Certd),
(
&["aziotd", "aziot-identityd"][..],
super::ProcessName::Identityd,
),
(&["aziotd", "aziot-keyd"][..], super | {
run(
aziot_tpmd::main,
"AZIOT_TPMD_CONFIG",
"/etc/aziot/tpmd/config.toml",
"AZIOT_TPMD_CONFIG_DIR",
"/etc/aziot/tpmd/config.d",
)
.await?
} | conditional_block |
main.rs | let arg = std::path::Path::new(&arg);
let process_name = arg.file_name().ok_or_else(|| {
ErrorKind::GetProcessName(
format!(
"could not extract process name from arg {:?}",
arg.display(),
)
.into(),
)
})?;
match process_name.to_str() {
Some("aziot-certd") => Ok(ProcessName::Certd),
Some("aziot-identityd") => Ok(ProcessName::Identityd),
Some("aziot-keyd") => Ok(ProcessName::Keyd),
Some("aziot-tpmd") => Ok(ProcessName::Tpmd),
// The next arg is the process name
#[cfg(debug_assertions)]
Some("aziotd") => process_name_from_args(args),
_ => Err(ErrorKind::GetProcessName(
format!("unrecognized process name {:?}", process_name).into(),
)
.into()),
}
}
async fn run<TMain, TConfig, TFuture, TServer>(
main: TMain,
config_env_var: &str,
config_file_default: &str,
config_directory_env_var: &str,
config_directory_default: &str,
) -> Result<(), Error>
where
TMain: FnOnce(TConfig) -> TFuture,
TConfig: serde::de::DeserializeOwned,
TFuture: std::future::Future<
Output = Result<(http_common::Connector, TServer), Box<dyn std::error::Error>>,
>,
TServer: hyper::service::Service<
hyper::Request<hyper::Body>,
Response = hyper::Response<hyper::Body>,
Error = std::convert::Infallible,
> + Clone
+ Send
+ 'static,
<TServer as hyper::service::Service<hyper::Request<hyper::Body>>>::Future: Send,
{
log::info!("Starting service...");
log::info!(
"Version - {}",
option_env!("PACKAGE_VERSION").unwrap_or("dev build"),
);
let config_path: std::path::PathBuf =
std::env::var_os(config_env_var).map_or_else(|| config_file_default.into(), Into::into);
let config = std::fs::read(&config_path)
.map_err(|err| ErrorKind::ReadConfig(Some(config_path.clone()), Box::new(err)))?;
let mut config: toml::Value = toml::from_slice(&config)
.map_err(|err| ErrorKind::ReadConfig(Some(config_path), Box::new(err)))?;
let config_directory_path: std::path::PathBuf = std::env::var_os(config_directory_env_var)
.map_or_else(|| config_directory_default.into(), Into::into);
match std::fs::read_dir(&config_directory_path) {
Ok(entries) => {
let mut patch_paths = vec![];
for entry in entries {
let entry = entry.map_err(|err| {
ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err))
})?;
let entry_file_type = entry.file_type().map_err(|err| {
ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err))
})?;
if !entry_file_type.is_file() {
continue;
}
let patch_path = entry.path();
if patch_path.extension().and_then(std::ffi::OsStr::to_str) != Some("toml") {
continue;
}
patch_paths.push(patch_path);
}
patch_paths.sort();
for patch_path in patch_paths {
let patch = std::fs::read(&patch_path).map_err(|err| {
ErrorKind::ReadConfig(Some(patch_path.clone()), Box::new(err))
})?;
let patch: toml::Value = toml::from_slice(&patch)
.map_err(|err| ErrorKind::ReadConfig(Some(patch_path), Box::new(err)))?;
merge_toml(&mut config, patch);
}
}
Err(err) if err.kind() == std::io::ErrorKind::NotFound => (),
Err(err) => {
return Err(ErrorKind::ReadConfig(Some(config_directory_path), Box::new(err)).into())
}
}
let config: TConfig = serde::Deserialize::deserialize(config)
.map_err(|err| ErrorKind::ReadConfig(None, Box::new(err)))?;
let (connector, server) = main(config).await.map_err(ErrorKind::Service)?;
log::info!("Starting server...");
let mut incoming = connector
.incoming()
.await
.map_err(|err| ErrorKind::Service(Box::new(err)))?;
let () = incoming
.serve(server)
.await
.map_err(|err| ErrorKind::Service(Box::new(err)))?;
log::info!("Stopped server.");
Ok(())
}
fn merge_toml(base: &mut toml::Value, patch: toml::Value) {
// Similar to JSON patch, except that:
//
// - Maps are called tables.
// - There is no equivalent of null that can be used to remove keys from an object.
// - Arrays are merged via concatenating the patch to the base, rather than replacing the base with the patch.
if let toml::Value::Table(base) = base {
if let toml::Value::Table(patch) = patch {
for (key, value) in patch {
// Insert a dummy `false` if the original key didn't exist at all. It'll be overwritten by `value` in that case.
let original_value = base.entry(key).or_insert(toml::Value::Boolean(false));
merge_toml(original_value, value);
}
return;
}
}
if let toml::Value::Array(base) = base {
if let toml::Value::Array(patch) = patch {
base.extend(patch);
return;
}
}
*base = patch;
}
#[cfg(test)]
mod tests {
#[test]
fn process_name_from_args() {
// Success test cases
let mut test_cases = vec![
(&["aziot-certd"][..], super::ProcessName::Certd),
(&["aziot-identityd"][..], super::ProcessName::Identityd),
(&["aziot-keyd"][..], super::ProcessName::Keyd),
(&["aziot-tpmd"][..], super::ProcessName::Tpmd),
(
&["/usr/libexec/aziot/aziot-certd"][..],
super::ProcessName::Certd,
),
(
&["/usr/libexec/aziot/aziot-identityd"][..],
super::ProcessName::Identityd,
),
(
&["/usr/libexec/aziot/aziot-keyd"][..],
super::ProcessName::Keyd,
),
(
&["/usr/libexec/aziot/aziot-tpmd"][..],
super::ProcessName::Tpmd,
),
];
// argv[1] fallback is only in release builds.
if cfg!(debug_assertions) {
test_cases.extend_from_slice(&[
(&["aziotd", "aziot-certd"][..], super::ProcessName::Certd),
(
&["aziotd", "aziot-identityd"][..],
super::ProcessName::Identityd,
),
(&["aziotd", "aziot-keyd"][..], super::ProcessName::Keyd),
(
&["/usr/libexec/aziot/aziotd", "aziot-certd"][..],
super::ProcessName::Certd,
),
(
&["/usr/libexec/aziot/aziotd", "aziot-identityd"][..],
super::ProcessName::Identityd,
),
(
&["/usr/libexec/aziot/aziotd", "aziot-keyd"][..],
super::ProcessName::Keyd,
),
(
&["/usr/libexec/aziot/aziotd", "aziot-tpmd"][..],
super::ProcessName::Tpmd,
),
]);
}
for (input, expected) in test_cases {
let mut input = input.iter().copied().map(std::ffi::OsStr::new);
let actual = super::process_name_from_args(&mut input).unwrap();
assert_eq!(None, input.next());
assert_eq!(expected, actual);
}
// Failure test cases
for &input in &[
// Unrecognized process name in argv[0]
&["foo"][..],
&["/usr/libexec/aziot/foo"][..],
&["/usr/libexec/aziot/foo", "aziot-certd"][..],
// Either fails because it's a release build so argv[1] fallback is disabled,
// or fails because it's a debug build where argv[1] fallback is enabled
// but the process name in argv[1] is unrecognized anyway.
&["aziotd", "foo"][..],
&["/usr/libexec/aziot/aziotd", "foo"][..],
] {
let mut input = input.iter().copied().map(std::ffi::OsStr::new);
let _ = super::process_name_from_args(&mut input).unwrap_err();
}
}
#[test]
fn | merge_toml | identifier_name | |
main.rs | "/etc/aziot/identityd/config.toml",
"AZIOT_IDENTITYD_CONFIG_DIR",
"/etc/aziot/identityd/config.d",
)
.await?
}
ProcessName::Keyd => {
run(
aziot_keyd::main,
"AZIOT_KEYD_CONFIG",
"/etc/aziot/keyd/config.toml",
"AZIOT_KEYD_CONFIG_DIR",
"/etc/aziot/keyd/config.d",
)
.await?
}
ProcessName::Tpmd => {
run(
aziot_tpmd::main,
"AZIOT_TPMD_CONFIG",
"/etc/aziot/tpmd/config.toml",
"AZIOT_TPMD_CONFIG_DIR",
"/etc/aziot/tpmd/config.d",
)
.await?
}
}
Ok(())
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum ProcessName {
Certd,
Identityd,
Keyd,
Tpmd,
}
/// If the symlink is being used to invoke this binary, the process name can be determined
/// from the first arg, ie `argv[0]` in C terms.
///
/// An alternative is supported where the binary is invoked as aziotd itself,
/// and the process name is instead the next arg, ie `argv[1]` in C terms.
/// This is primary useful for local development, so it's only allowed in debug builds.
fn process_name_from_args<I>(args: &mut I) -> Result<ProcessName, Error>
where
I: Iterator,
<I as Iterator>::Item: AsRef<std::ffi::OsStr>,
{
let arg = args.next().ok_or_else(|| {
ErrorKind::GetProcessName("could not extract process name from args".into())
})?;
// arg could be a single component like "aziot-certd", or a path that ends with "aziot-certd",
// so parse it as a Path and get the last component. This does the right thing in either case.
let arg = std::path::Path::new(&arg);
let process_name = arg.file_name().ok_or_else(|| {
ErrorKind::GetProcessName(
format!(
"could not extract process name from arg {:?}",
arg.display(),
)
.into(),
)
})?;
match process_name.to_str() {
Some("aziot-certd") => Ok(ProcessName::Certd),
Some("aziot-identityd") => Ok(ProcessName::Identityd),
Some("aziot-keyd") => Ok(ProcessName::Keyd),
Some("aziot-tpmd") => Ok(ProcessName::Tpmd),
// The next arg is the process name
#[cfg(debug_assertions)]
Some("aziotd") => process_name_from_args(args),
_ => Err(ErrorKind::GetProcessName(
format!("unrecognized process name {:?}", process_name).into(),
)
.into()),
}
}
async fn run<TMain, TConfig, TFuture, TServer>(
main: TMain,
config_env_var: &str,
config_file_default: &str,
config_directory_env_var: &str,
config_directory_default: &str,
) -> Result<(), Error>
where
TMain: FnOnce(TConfig) -> TFuture,
TConfig: serde::de::DeserializeOwned,
TFuture: std::future::Future<
Output = Result<(http_common::Connector, TServer), Box<dyn std::error::Error>>,
>,
TServer: hyper::service::Service<
hyper::Request<hyper::Body>,
Response = hyper::Response<hyper::Body>,
Error = std::convert::Infallible,
> + Clone
+ Send
+ 'static,
<TServer as hyper::service::Service<hyper::Request<hyper::Body>>>::Future: Send,
{
log::info!("Starting service...");
log::info!(
"Version - {}",
option_env!("PACKAGE_VERSION").unwrap_or("dev build"),
);
let config_path: std::path::PathBuf =
std::env::var_os(config_env_var).map_or_else(|| config_file_default.into(), Into::into);
let config = std::fs::read(&config_path)
.map_err(|err| ErrorKind::ReadConfig(Some(config_path.clone()), Box::new(err)))?;
let mut config: toml::Value = toml::from_slice(&config)
.map_err(|err| ErrorKind::ReadConfig(Some(config_path), Box::new(err)))?;
let config_directory_path: std::path::PathBuf = std::env::var_os(config_directory_env_var)
.map_or_else(|| config_directory_default.into(), Into::into);
match std::fs::read_dir(&config_directory_path) {
Ok(entries) => {
let mut patch_paths = vec![];
for entry in entries {
let entry = entry.map_err(|err| {
ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err))
})?;
let entry_file_type = entry.file_type().map_err(|err| {
ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err))
})?;
if !entry_file_type.is_file() {
continue;
}
let patch_path = entry.path();
if patch_path.extension().and_then(std::ffi::OsStr::to_str) != Some("toml") {
continue;
}
patch_paths.push(patch_path);
}
patch_paths.sort();
for patch_path in patch_paths {
let patch = std::fs::read(&patch_path).map_err(|err| {
ErrorKind::ReadConfig(Some(patch_path.clone()), Box::new(err))
})?;
let patch: toml::Value = toml::from_slice(&patch)
.map_err(|err| ErrorKind::ReadConfig(Some(patch_path), Box::new(err)))?;
merge_toml(&mut config, patch);
}
}
Err(err) if err.kind() == std::io::ErrorKind::NotFound => (),
Err(err) => {
return Err(ErrorKind::ReadConfig(Some(config_directory_path), Box::new(err)).into())
}
}
let config: TConfig = serde::Deserialize::deserialize(config)
.map_err(|err| ErrorKind::ReadConfig(None, Box::new(err)))?;
let (connector, server) = main(config).await.map_err(ErrorKind::Service)?;
log::info!("Starting server...");
let mut incoming = connector
.incoming()
.await
.map_err(|err| ErrorKind::Service(Box::new(err)))?;
let () = incoming
.serve(server)
.await
.map_err(|err| ErrorKind::Service(Box::new(err)))?;
log::info!("Stopped server.");
Ok(())
}
fn merge_toml(base: &mut toml::Value, patch: toml::Value) {
// Similar to JSON patch, except that:
//
// - Maps are called tables.
// - There is no equivalent of null that can be used to remove keys from an object.
// - Arrays are merged via concatenating the patch to the base, rather than replacing the base with the patch.
if let toml::Value::Table(base) = base {
if let toml::Value::Table(patch) = patch {
for (key, value) in patch {
// Insert a dummy `false` if the original key didn't exist at all. It'll be overwritten by `value` in that case.
let original_value = base.entry(key).or_insert(toml::Value::Boolean(false));
merge_toml(original_value, value);
}
return;
}
}
if let toml::Value::Array(base) = base {
if let toml::Value::Array(patch) = patch {
base.extend(patch);
return;
}
}
*base = patch;
}
#[cfg(test)]
mod tests {
#[test]
fn process_name_from_args() {
// Success test cases
let mut test_cases = vec![
(&["aziot-certd"][..], super::ProcessName::Certd),
(&["aziot-identityd"][..], super::ProcessName::Identityd),
(&["aziot-keyd"][..], super::ProcessName::Keyd),
(&["aziot-tpmd"][..], super::ProcessName::Tpmd),
(
&["/usr/libexec/aziot/aziot-certd"][..],
super::ProcessName::Certd,
),
(
&["/usr/libexec/aziot/aziot-identityd"][..],
super::ProcessName::Identityd,
),
(
&["/usr/libexec/aziot/aziot-keyd"][ | {
let mut args = std::env::args_os();
let process_name = process_name_from_args(&mut args)?;
match process_name {
ProcessName::Certd => {
run(
aziot_certd::main,
"AZIOT_CERTD_CONFIG",
"/etc/aziot/certd/config.toml",
"AZIOT_CERTD_CONFIG_DIR",
"/etc/aziot/certd/config.d",
)
.await?
}
ProcessName::Identityd => {
run(
aziot_identityd::main,
"AZIOT_IDENTITYD_CONFIG", | identifier_body | |
blackboxexporter.go | /core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
vpaautoscalingv1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
resourcesv1alpha1 "github.com/gardener/gardener/pkg/apis/resources/v1alpha1"
"github.com/gardener/gardener/pkg/client/kubernetes"
"github.com/gardener/gardener/pkg/component"
"github.com/gardener/gardener/pkg/resourcemanager/controller/garbagecollector/references"
"github.com/gardener/gardener/pkg/utils"
kubernetesutils "github.com/gardener/gardener/pkg/utils/kubernetes"
"github.com/gardener/gardener/pkg/utils/managedresources"
)
const (
// ManagedResourceName is the name of the ManagedResource containing the resource specifications.
ManagedResourceName = "shoot-core-blackbox-exporter"
labelValue = "blackbox-exporter"
labelKeyComponent = "component"
)
// Interface contains functions for a blackbox-exporter deployer.
type Interface interface {
component.DeployWaiter
component.MonitoringComponent
}
// Values is a set of configuration values for the blackbox-exporter.
type Values struct {
// Image is the container image used for blackbox-exporter.
Image string
// VPAEnabled marks whether VerticalPodAutoscaler is enabled for the shoot.
VPAEnabled bool
// KubernetesVersion is the Kubernetes version of the Shoot.
KubernetesVersion *semver.Version
}
// New creates a new instance of DeployWaiter for blackbox-exporter.
func New(
client client.Client,
namespace string,
values Values,
) Interface {
return &blackboxExporter{
client: client,
namespace: namespace,
values: values,
}
}
type blackboxExporter struct {
client client.Client
namespace string
values Values
}
func (b *blackboxExporter) Deploy(ctx context.Context) error {
data, err := b.computeResourcesData()
if err != nil {
return err
}
return managedresources.CreateForShoot(ctx, b.client, b.namespace, ManagedResourceName, managedresources.LabelValueGardener, false, data)
}
func (b *blackboxExporter) Destroy(ctx context.Context) error {
return managedresources.DeleteForShoot(ctx, b.client, b.namespace, ManagedResourceName)
}
// TimeoutWaitForManagedResource is the timeout used while waiting for the ManagedResources to become healthy
// or deleted.
var TimeoutWaitForManagedResource = 2 * time.Minute
func (b *blackboxExporter) Wait(ctx context.Context) error {
timeoutCtx, cancel := context.WithTimeout(ctx, TimeoutWaitForManagedResource)
defer cancel()
return managedresources.WaitUntilHealthy(timeoutCtx, b.client, b.namespace, ManagedResourceName)
}
func (b *blackboxExporter) WaitCleanup(ctx context.Context) error |
func (b *blackboxExporter) computeResourcesData() (map[string][]byte, error) {
var (
intStrOne = intstr.FromInt(1)
registry = managedresources.NewRegistry(kubernetes.ShootScheme, kubernetes.ShootCodec, kubernetes.ShootSerializer)
serviceAccount = &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: getLabels(),
},
AutomountServiceAccountToken: pointer.Bool(false),
}
configMap = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter-config",
Namespace: metav1.NamespaceSystem,
Labels: map[string]string{
v1beta1constants.LabelApp: "prometheus",
v1beta1constants.LabelRole: v1beta1constants.GardenRoleMonitoring,
},
},
Data: map[string]string{
`blackbox.yaml`: `modules:
http_kubernetes_service:
prober: http
timeout: 10s
http:
headers:
Accept: "*/*"
Accept-Language: "en-US"
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
preferred_ip_protocol: "ip4"
`,
},
}
)
utilruntime.Must(kubernetesutils.MakeUnique(configMap))
var (
deployment = &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: utils.MergeStringMaps(
getLabels(),
map[string]string{
resourcesv1alpha1.HighAvailabilityConfigType: resourcesv1alpha1.HighAvailabilityConfigTypeServer,
},
),
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32(1),
RevisionHistoryLimit: pointer.Int32(2),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
labelKeyComponent: labelValue,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{},
Labels: utils.MergeStringMaps(
getLabels(),
map[string]string{
v1beta1constants.LabelNetworkPolicyShootFromSeed: v1beta1constants.LabelNetworkPolicyAllowed,
v1beta1constants.LabelNetworkPolicyToDNS: v1beta1constants.LabelNetworkPolicyAllowed,
v1beta1constants.LabelNetworkPolicyToPublicNetworks: v1beta1constants.LabelNetworkPolicyAllowed,
v1beta1constants.LabelNetworkPolicyShootToAPIServer: v1beta1constants.LabelNetworkPolicyAllowed,
},
),
},
Spec: corev1.PodSpec{
ServiceAccountName: serviceAccount.Name,
PriorityClassName: "system-cluster-critical",
SecurityContext: &corev1.PodSecurityContext{
RunAsUser: pointer.Int64(65534),
FSGroup: pointer.Int64(65534),
SupplementalGroups: []int64{1},
SeccompProfile: &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeRuntimeDefault,
},
},
Containers: []corev1.Container{
{
Name: "blackbox-exporter",
Image: b.values.Image,
Args: []string{
"--config.file=/etc/blackbox_exporter/blackbox.yaml",
"--log.level=debug",
},
ImagePullPolicy: corev1.PullIfNotPresent,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10m"),
corev1.ResourceMemory: resource.MustParse("25Mi"),
},
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("128Mi"),
},
},
Ports: []corev1.ContainerPort{
{
Name: "probe",
ContainerPort: int32(9115),
Protocol: corev1.ProtocolTCP,
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "blackbox-exporter-config",
MountPath: "/etc/blackbox_exporter",
},
},
},
},
DNSConfig: &corev1.PodDNSConfig{
Options: []corev1.PodDNSConfigOption{
{
Name: "ndots",
Value: pointer.String("3"),
},
},
},
Volumes: []corev1.Volume{
{
Name: "blackbox-exporter-config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: configMap.Name,
},
},
},
},
},
},
},
},
}
service = &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: map[string]string{
labelKeyComponent: labelValue,
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Ports: []corev1.ServicePort{
{
Name: "probe",
Port: int32(9115),
Protocol: corev1.ProtocolTCP,
},
},
Selector: map[string]string{
| {
timeoutCtx, cancel := context.WithTimeout(ctx, TimeoutWaitForManagedResource)
defer cancel()
return managedresources.WaitUntilDeleted(timeoutCtx, b.client, b.namespace, ManagedResourceName)
} | identifier_body |
blackboxexporter.go | /api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
vpaautoscalingv1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
resourcesv1alpha1 "github.com/gardener/gardener/pkg/apis/resources/v1alpha1"
"github.com/gardener/gardener/pkg/client/kubernetes"
"github.com/gardener/gardener/pkg/component"
"github.com/gardener/gardener/pkg/resourcemanager/controller/garbagecollector/references"
"github.com/gardener/gardener/pkg/utils"
kubernetesutils "github.com/gardener/gardener/pkg/utils/kubernetes"
"github.com/gardener/gardener/pkg/utils/managedresources"
)
const (
// ManagedResourceName is the name of the ManagedResource containing the resource specifications.
ManagedResourceName = "shoot-core-blackbox-exporter"
labelValue = "blackbox-exporter"
labelKeyComponent = "component"
)
// Interface contains functions for a blackbox-exporter deployer.
type Interface interface {
component.DeployWaiter
component.MonitoringComponent
}
// Values is a set of configuration values for the blackbox-exporter.
type Values struct {
// Image is the container image used for blackbox-exporter.
Image string
// VPAEnabled marks whether VerticalPodAutoscaler is enabled for the shoot.
VPAEnabled bool
// KubernetesVersion is the Kubernetes version of the Shoot.
KubernetesVersion *semver.Version
}
// New creates a new instance of DeployWaiter for blackbox-exporter.
func New(
client client.Client,
namespace string,
values Values,
) Interface {
return &blackboxExporter{
client: client,
namespace: namespace,
values: values,
}
}
type blackboxExporter struct {
client client.Client
namespace string
values Values
}
func (b *blackboxExporter) Deploy(ctx context.Context) error {
data, err := b.computeResourcesData()
if err != nil {
return err
}
return managedresources.CreateForShoot(ctx, b.client, b.namespace, ManagedResourceName, managedresources.LabelValueGardener, false, data)
}
func (b *blackboxExporter) Destroy(ctx context.Context) error {
return managedresources.DeleteForShoot(ctx, b.client, b.namespace, ManagedResourceName)
}
// TimeoutWaitForManagedResource is the timeout used while waiting for the ManagedResources to become healthy
// or deleted.
var TimeoutWaitForManagedResource = 2 * time.Minute
func (b *blackboxExporter) Wait(ctx context.Context) error {
timeoutCtx, cancel := context.WithTimeout(ctx, TimeoutWaitForManagedResource)
defer cancel()
return managedresources.WaitUntilHealthy(timeoutCtx, b.client, b.namespace, ManagedResourceName)
}
func (b *blackboxExporter) WaitCleanup(ctx context.Context) error {
timeoutCtx, cancel := context.WithTimeout(ctx, TimeoutWaitForManagedResource)
defer cancel()
return managedresources.WaitUntilDeleted(timeoutCtx, b.client, b.namespace, ManagedResourceName)
}
func (b *blackboxExporter) computeResourcesData() (map[string][]byte, error) {
var (
intStrOne = intstr.FromInt(1)
registry = managedresources.NewRegistry(kubernetes.ShootScheme, kubernetes.ShootCodec, kubernetes.ShootSerializer)
serviceAccount = &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: getLabels(),
},
AutomountServiceAccountToken: pointer.Bool(false),
}
configMap = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter-config",
Namespace: metav1.NamespaceSystem,
Labels: map[string]string{
v1beta1constants.LabelApp: "prometheus",
v1beta1constants.LabelRole: v1beta1constants.GardenRoleMonitoring,
},
},
Data: map[string]string{
`blackbox.yaml`: `modules:
http_kubernetes_service:
prober: http
timeout: 10s
http:
headers:
Accept: "*/*"
Accept-Language: "en-US"
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
preferred_ip_protocol: "ip4"
`,
},
}
)
utilruntime.Must(kubernetesutils.MakeUnique(configMap))
var (
deployment = &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: utils.MergeStringMaps(
getLabels(),
map[string]string{
resourcesv1alpha1.HighAvailabilityConfigType: resourcesv1alpha1.HighAvailabilityConfigTypeServer,
},
),
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32(1),
RevisionHistoryLimit: pointer.Int32(2),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
labelKeyComponent: labelValue,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{},
Labels: utils.MergeStringMaps(
getLabels(),
map[string]string{
v1beta1constants.LabelNetworkPolicyShootFromSeed: v1beta1constants.LabelNetworkPolicyAllowed,
v1beta1constants.LabelNetworkPolicyToDNS: v1beta1constants.LabelNetworkPolicyAllowed,
v1beta1constants.LabelNetworkPolicyToPublicNetworks: v1beta1constants.LabelNetworkPolicyAllowed,
v1beta1constants.LabelNetworkPolicyShootToAPIServer: v1beta1constants.LabelNetworkPolicyAllowed,
},
),
},
Spec: corev1.PodSpec{
ServiceAccountName: serviceAccount.Name,
PriorityClassName: "system-cluster-critical",
SecurityContext: &corev1.PodSecurityContext{
RunAsUser: pointer.Int64(65534),
FSGroup: pointer.Int64(65534),
SupplementalGroups: []int64{1},
SeccompProfile: &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeRuntimeDefault,
},
},
Containers: []corev1.Container{
{
Name: "blackbox-exporter",
Image: b.values.Image,
Args: []string{
"--config.file=/etc/blackbox_exporter/blackbox.yaml",
"--log.level=debug",
},
ImagePullPolicy: corev1.PullIfNotPresent,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{ | Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("128Mi"),
},
},
Ports: []corev1.ContainerPort{
{
Name: "probe",
ContainerPort: int32(9115),
Protocol: corev1.ProtocolTCP,
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "blackbox-exporter-config",
MountPath: "/etc/blackbox_exporter",
},
},
},
},
DNSConfig: &corev1.PodDNSConfig{
Options: []corev1.PodDNSConfigOption{
{
Name: "ndots",
Value: pointer.String("3"),
},
},
},
Volumes: []corev1.Volume{
{
Name: "blackbox-exporter-config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: configMap.Name,
},
},
},
},
},
},
},
},
}
service = &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: map[string]string{
labelKeyComponent: labelValue,
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Ports: []corev1.ServicePort{
{
Name: "probe",
Port: int32(9115),
Protocol: corev1.ProtocolTCP,
},
},
Selector: map[string]string{
label | corev1.ResourceCPU: resource.MustParse("10m"),
corev1.ResourceMemory: resource.MustParse("25Mi"),
}, | random_line_split |
blackboxexporter.go | values: values,
}
}
type blackboxExporter struct {
client client.Client
namespace string
values Values
}
func (b *blackboxExporter) Deploy(ctx context.Context) error {
data, err := b.computeResourcesData()
if err != nil {
return err
}
return managedresources.CreateForShoot(ctx, b.client, b.namespace, ManagedResourceName, managedresources.LabelValueGardener, false, data)
}
func (b *blackboxExporter) Destroy(ctx context.Context) error {
return managedresources.DeleteForShoot(ctx, b.client, b.namespace, ManagedResourceName)
}
// TimeoutWaitForManagedResource is the timeout used while waiting for the ManagedResources to become healthy
// or deleted.
var TimeoutWaitForManagedResource = 2 * time.Minute
func (b *blackboxExporter) Wait(ctx context.Context) error {
timeoutCtx, cancel := context.WithTimeout(ctx, TimeoutWaitForManagedResource)
defer cancel()
return managedresources.WaitUntilHealthy(timeoutCtx, b.client, b.namespace, ManagedResourceName)
}
func (b *blackboxExporter) WaitCleanup(ctx context.Context) error {
timeoutCtx, cancel := context.WithTimeout(ctx, TimeoutWaitForManagedResource)
defer cancel()
return managedresources.WaitUntilDeleted(timeoutCtx, b.client, b.namespace, ManagedResourceName)
}
func (b *blackboxExporter) computeResourcesData() (map[string][]byte, error) {
var (
intStrOne = intstr.FromInt(1)
registry = managedresources.NewRegistry(kubernetes.ShootScheme, kubernetes.ShootCodec, kubernetes.ShootSerializer)
serviceAccount = &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: getLabels(),
},
AutomountServiceAccountToken: pointer.Bool(false),
}
configMap = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter-config",
Namespace: metav1.NamespaceSystem,
Labels: map[string]string{
v1beta1constants.LabelApp: "prometheus",
v1beta1constants.LabelRole: v1beta1constants.GardenRoleMonitoring,
},
},
Data: map[string]string{
`blackbox.yaml`: `modules:
http_kubernetes_service:
prober: http
timeout: 10s
http:
headers:
Accept: "*/*"
Accept-Language: "en-US"
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
preferred_ip_protocol: "ip4"
`,
},
}
)
utilruntime.Must(kubernetesutils.MakeUnique(configMap))
var (
deployment = &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: utils.MergeStringMaps(
getLabels(),
map[string]string{
resourcesv1alpha1.HighAvailabilityConfigType: resourcesv1alpha1.HighAvailabilityConfigTypeServer,
},
),
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32(1),
RevisionHistoryLimit: pointer.Int32(2),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
labelKeyComponent: labelValue,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{},
Labels: utils.MergeStringMaps(
getLabels(),
map[string]string{
v1beta1constants.LabelNetworkPolicyShootFromSeed: v1beta1constants.LabelNetworkPolicyAllowed,
v1beta1constants.LabelNetworkPolicyToDNS: v1beta1constants.LabelNetworkPolicyAllowed,
v1beta1constants.LabelNetworkPolicyToPublicNetworks: v1beta1constants.LabelNetworkPolicyAllowed,
v1beta1constants.LabelNetworkPolicyShootToAPIServer: v1beta1constants.LabelNetworkPolicyAllowed,
},
),
},
Spec: corev1.PodSpec{
ServiceAccountName: serviceAccount.Name,
PriorityClassName: "system-cluster-critical",
SecurityContext: &corev1.PodSecurityContext{
RunAsUser: pointer.Int64(65534),
FSGroup: pointer.Int64(65534),
SupplementalGroups: []int64{1},
SeccompProfile: &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeRuntimeDefault,
},
},
Containers: []corev1.Container{
{
Name: "blackbox-exporter",
Image: b.values.Image,
Args: []string{
"--config.file=/etc/blackbox_exporter/blackbox.yaml",
"--log.level=debug",
},
ImagePullPolicy: corev1.PullIfNotPresent,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10m"),
corev1.ResourceMemory: resource.MustParse("25Mi"),
},
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("128Mi"),
},
},
Ports: []corev1.ContainerPort{
{
Name: "probe",
ContainerPort: int32(9115),
Protocol: corev1.ProtocolTCP,
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "blackbox-exporter-config",
MountPath: "/etc/blackbox_exporter",
},
},
},
},
DNSConfig: &corev1.PodDNSConfig{
Options: []corev1.PodDNSConfigOption{
{
Name: "ndots",
Value: pointer.String("3"),
},
},
},
Volumes: []corev1.Volume{
{
Name: "blackbox-exporter-config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: configMap.Name,
},
},
},
},
},
},
},
},
}
service = &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: map[string]string{
labelKeyComponent: labelValue,
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Ports: []corev1.ServicePort{
{
Name: "probe",
Port: int32(9115),
Protocol: corev1.ProtocolTCP,
},
},
Selector: map[string]string{
labelKeyComponent: labelValue,
},
},
}
podDisruptionBudget = &policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: map[string]string{
v1beta1constants.GardenRole: v1beta1constants.GardenRoleMonitoring,
labelKeyComponent: labelValue,
},
},
Spec: policyv1.PodDisruptionBudgetSpec{
MaxUnavailable: &intStrOne,
Selector: deployment.Spec.Selector,
},
}
vpa *vpaautoscalingv1.VerticalPodAutoscaler
)
utilruntime.Must(references.InjectAnnotations(deployment))
if b.values.VPAEnabled {
vpaUpdateMode := vpaautoscalingv1.UpdateModeAuto
vpaControlledValues := vpaautoscalingv1.ContainerControlledValuesRequestsOnly
vpa = &vpaautoscalingv1.VerticalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
},
Spec: vpaautoscalingv1.VerticalPodAutoscalerSpec{
TargetRef: &autoscalingv1.CrossVersionObjectReference{
APIVersion: appsv1.SchemeGroupVersion.String(),
Kind: "Deployment",
Name: deployment.Name,
},
UpdatePolicy: &vpaautoscalingv1.PodUpdatePolicy{
UpdateMode: &vpaUpdateMode,
},
ResourcePolicy: &vpaautoscalingv1.PodResourcePolicy{
ContainerPolicies: []vpaautoscalingv1.ContainerResourcePolicy{
{
ContainerName: vpaautoscalingv1.DefaultContainerResourcePolicy,
ControlledValues: &vpaControlledValues,
},
},
},
},
}
}
return registry.AddAllAndSerialize(
serviceAccount,
configMap,
deployment,
podDisruptionBudget,
service,
vpa,
)
}
func | getLabels | identifier_name | |
blackboxexporter.go | /core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
vpaautoscalingv1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
resourcesv1alpha1 "github.com/gardener/gardener/pkg/apis/resources/v1alpha1"
"github.com/gardener/gardener/pkg/client/kubernetes"
"github.com/gardener/gardener/pkg/component"
"github.com/gardener/gardener/pkg/resourcemanager/controller/garbagecollector/references"
"github.com/gardener/gardener/pkg/utils"
kubernetesutils "github.com/gardener/gardener/pkg/utils/kubernetes"
"github.com/gardener/gardener/pkg/utils/managedresources"
)
const (
// ManagedResourceName is the name of the ManagedResource containing the resource specifications.
ManagedResourceName = "shoot-core-blackbox-exporter"
labelValue = "blackbox-exporter"
labelKeyComponent = "component"
)
// Interface contains functions for a blackbox-exporter deployer.
type Interface interface {
component.DeployWaiter
component.MonitoringComponent
}
// Values is a set of configuration values for the blackbox-exporter.
type Values struct {
// Image is the container image used for blackbox-exporter.
Image string
// VPAEnabled marks whether VerticalPodAutoscaler is enabled for the shoot.
VPAEnabled bool
// KubernetesVersion is the Kubernetes version of the Shoot.
KubernetesVersion *semver.Version
}
// New creates a new instance of DeployWaiter for blackbox-exporter.
func New(
client client.Client,
namespace string,
values Values,
) Interface {
return &blackboxExporter{
client: client,
namespace: namespace,
values: values,
}
}
type blackboxExporter struct {
client client.Client
namespace string
values Values
}
func (b *blackboxExporter) Deploy(ctx context.Context) error {
data, err := b.computeResourcesData()
if err != nil |
return managedresources.CreateForShoot(ctx, b.client, b.namespace, ManagedResourceName, managedresources.LabelValueGardener, false, data)
}
func (b *blackboxExporter) Destroy(ctx context.Context) error {
return managedresources.DeleteForShoot(ctx, b.client, b.namespace, ManagedResourceName)
}
// TimeoutWaitForManagedResource is the timeout used while waiting for the ManagedResources to become healthy
// or deleted.
var TimeoutWaitForManagedResource = 2 * time.Minute
func (b *blackboxExporter) Wait(ctx context.Context) error {
timeoutCtx, cancel := context.WithTimeout(ctx, TimeoutWaitForManagedResource)
defer cancel()
return managedresources.WaitUntilHealthy(timeoutCtx, b.client, b.namespace, ManagedResourceName)
}
func (b *blackboxExporter) WaitCleanup(ctx context.Context) error {
timeoutCtx, cancel := context.WithTimeout(ctx, TimeoutWaitForManagedResource)
defer cancel()
return managedresources.WaitUntilDeleted(timeoutCtx, b.client, b.namespace, ManagedResourceName)
}
func (b *blackboxExporter) computeResourcesData() (map[string][]byte, error) {
var (
intStrOne = intstr.FromInt(1)
registry = managedresources.NewRegistry(kubernetes.ShootScheme, kubernetes.ShootCodec, kubernetes.ShootSerializer)
serviceAccount = &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: getLabels(),
},
AutomountServiceAccountToken: pointer.Bool(false),
}
configMap = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter-config",
Namespace: metav1.NamespaceSystem,
Labels: map[string]string{
v1beta1constants.LabelApp: "prometheus",
v1beta1constants.LabelRole: v1beta1constants.GardenRoleMonitoring,
},
},
Data: map[string]string{
`blackbox.yaml`: `modules:
http_kubernetes_service:
prober: http
timeout: 10s
http:
headers:
Accept: "*/*"
Accept-Language: "en-US"
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
preferred_ip_protocol: "ip4"
`,
},
}
)
utilruntime.Must(kubernetesutils.MakeUnique(configMap))
var (
deployment = &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: utils.MergeStringMaps(
getLabels(),
map[string]string{
resourcesv1alpha1.HighAvailabilityConfigType: resourcesv1alpha1.HighAvailabilityConfigTypeServer,
},
),
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32(1),
RevisionHistoryLimit: pointer.Int32(2),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
labelKeyComponent: labelValue,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{},
Labels: utils.MergeStringMaps(
getLabels(),
map[string]string{
v1beta1constants.LabelNetworkPolicyShootFromSeed: v1beta1constants.LabelNetworkPolicyAllowed,
v1beta1constants.LabelNetworkPolicyToDNS: v1beta1constants.LabelNetworkPolicyAllowed,
v1beta1constants.LabelNetworkPolicyToPublicNetworks: v1beta1constants.LabelNetworkPolicyAllowed,
v1beta1constants.LabelNetworkPolicyShootToAPIServer: v1beta1constants.LabelNetworkPolicyAllowed,
},
),
},
Spec: corev1.PodSpec{
ServiceAccountName: serviceAccount.Name,
PriorityClassName: "system-cluster-critical",
SecurityContext: &corev1.PodSecurityContext{
RunAsUser: pointer.Int64(65534),
FSGroup: pointer.Int64(65534),
SupplementalGroups: []int64{1},
SeccompProfile: &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeRuntimeDefault,
},
},
Containers: []corev1.Container{
{
Name: "blackbox-exporter",
Image: b.values.Image,
Args: []string{
"--config.file=/etc/blackbox_exporter/blackbox.yaml",
"--log.level=debug",
},
ImagePullPolicy: corev1.PullIfNotPresent,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10m"),
corev1.ResourceMemory: resource.MustParse("25Mi"),
},
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse("128Mi"),
},
},
Ports: []corev1.ContainerPort{
{
Name: "probe",
ContainerPort: int32(9115),
Protocol: corev1.ProtocolTCP,
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "blackbox-exporter-config",
MountPath: "/etc/blackbox_exporter",
},
},
},
},
DNSConfig: &corev1.PodDNSConfig{
Options: []corev1.PodDNSConfigOption{
{
Name: "ndots",
Value: pointer.String("3"),
},
},
},
Volumes: []corev1.Volume{
{
Name: "blackbox-exporter-config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: configMap.Name,
},
},
},
},
},
},
},
},
}
service = &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "blackbox-exporter",
Namespace: metav1.NamespaceSystem,
Labels: map[string]string{
labelKeyComponent: labelValue,
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Ports: []corev1.ServicePort{
{
Name: "probe",
Port: int32(9115),
Protocol: corev1.ProtocolTCP,
},
},
Selector: map[string]string{
| {
return err
} | conditional_block |
mod.rs | , and remove that example from strata.
///
/// A `Shared Weight Table` maintains the sum of the weights of all examples in each stratum.
/// The `Assigner`s increase the value in the `Shared Weight Table` when a new example is inserted into
/// a stratum.
/// The `Sampler`s use the weights in the `Shared Weight Table` to decide which stratum to read next and
/// send its next sampled example to the memory buffer. After an example is processed, the `Sampler` also
/// updates its weight, sends it to right stratum, and updates `Shared Weight Table` accordingly.
pub fn new(
num_examples: usize,
feature_size: usize,
positive: String,
num_examples_per_block: usize,
disk_buffer_filename: &str,
num_assigners: usize,
num_samplers: usize,
sampled_examples: Sender<(ExampleWithScore, u32)>,
sampling_signal: Receiver<Signal>,
models: Receiver<Model>,
channel_size: usize,
debug_mode: bool,
) -> StratifiedStorage {
let strata = Strata::new(num_examples, feature_size, num_examples_per_block,
disk_buffer_filename);
let strata = Arc::new(RwLock::new(strata));
let (counts_table_r, mut counts_table_w) = evmap::new();
let (weights_table_r, mut weights_table_w) = evmap::new();
let (updated_examples_s, updated_examples_r) = channel::bounded(channel_size, "updated-examples");
// The messages in the stats channel are very small, so its capacity can be larger.
let (stats_update_s, stats_update_r) = channel::bounded(5000000, "stats");
// Update shared weights table (non-blocking)
{
let counts_table_r = counts_table_r.clone();
let weights_table_r = weights_table_r.clone();
spawn(move || {
while let Some((index, (count, weight))) = stats_update_r.recv() {
let val = counts_table_r.get_and(&index, |vs| vs[0]);
counts_table_w.update(index, val.unwrap_or(0) + count);
let cur = weights_table_r.get_and(&index, |vs: &[Box<F64>]| vs[0].val)
.unwrap_or(0.0);
weights_table_w.update(index, Box::new(F64 { val: cur + weight }));
{
counts_table_w.refresh();
weights_table_w.refresh();
}
}
});
}
// Monitor the distribution of strata
if debug_mode {
let counts_table_r = counts_table_r.clone();
let weights_table_r = weights_table_r.clone();
spawn(move || {
loop {
sleep(Duration::from_millis(5000));
let mut p: Vec<(i8, f64)> =
weights_table_r.map_into(|a: &i8, b: &[Box<F64>]| (a.clone(), b[0].val));
p.sort_by(|a, b| (a.0).cmp(&b.0));
let mut c: Vec<(i8, i32)> = counts_table_r.map_into(|a, b| (a.clone(), b[0]));
c.sort_by(|a, b| (a.0).cmp(&b.0));
let mut sump: f64 = p.iter().map(|t| t.1).sum();
if get_sign(sump) == 0 {
sump = 1.0;
}
let ps: Vec<String> = p.into_iter()
.map(|(idx, w)| (idx, 100.0 * w / sump))
.map(|(idx, w)| format!("({}, {:.2})", idx, w))
.collect();
debug!("strata weights distr, {}, {}", ps.join(", "), sump);
let sumc: i32 = max(c.iter().map(|t| t.1).sum(), 1);
let cs: Vec<String> = c.into_iter()
.map(|(idx, c)| (idx, 100.0 * c as f32 / (sumc as f32)))
.map(|(idx, c)| format!("({}, {:.2})", idx, c))
.collect();
debug!("strata counts distr, {}, {}", cs.join(", "), sumc);
}
});
}
let assigners = Assigners::new(
updated_examples_r,
strata.clone(),
stats_update_s.clone(),
num_assigners,
);
let samplers = Samplers::new(
strata.clone(),
sampled_examples.clone(),
updated_examples_s.clone(),
models.clone(),
stats_update_s.clone(),
weights_table_r.clone(),
sampling_signal.clone(),
num_samplers,
);
assigners.run();
samplers.run();
StratifiedStorage {
// num_examples: num_examples,
// feature_size: feature_size,
// num_examples_per_block: num_examples_per_block,
// disk_buffer_filename: String::from(disk_buffer_filename),
// strata: strata,
// stats_update_s: stats_update_s,
counts_table_r: counts_table_r,
weights_table_r: weights_table_r,
// num_assigners: num_assigners,
// num_samplers: num_samplers,
// updated_examples_r: updated_examples_r,
updated_examples_s: updated_examples_s,
// sampled_examples_s: sampled_examples,
// sampling_signal: sampling_signal,
// models: models,
positive: positive,
}
}
pub fn init_stratified_from_file(
&self,
filename: String,
size: usize,
batch_size: usize,
feature_size: usize,
range: Range<usize>,
bins: Vec<Bins>,
) {
let mut reader = SerialStorage::new(
filename.clone(),
size,
feature_size,
true,
self.positive.clone(),
None,
range.clone(),
);
let updated_examples_s = self.updated_examples_s.clone();
spawn(move || {
let mut index = 0;
while index < size {
reader.read_raw(batch_size).into_iter().for_each(|data| {
let features: Vec<TFeature> =
data.feature.iter().enumerate()
.map(|(idx, val)| {
if range.start <= idx && idx < range.end {
bins[idx - range.start].get_split_index(*val)
} else {
0
}
}).collect();
let mapped_data = LabeledData::new(features, data.label);
updated_examples_s.send((mapped_data, (0.0, 0)));
});
index += batch_size;
}
debug!("Raw data on disk has been loaded into the stratified storage, \
filename {}, capacity {}, feature size {}", filename, size, feature_size);
});
}
}
fn sample_weights_table(weights_table_r: &WeightTableRead) -> Option<i8> {
let p: Vec<(i8, f64)> = weights_table_r.map_into(|a, b| (a.clone(), b[0].val));
let sum_of_weights: f64 = p.iter().map(|t| t.1).sum();
if get_sign(sum_of_weights) == 0 {
None
} else {
let mut frac = rand::random::<f64>() * sum_of_weights;
let mut iter = p.iter();
let mut key_val = &(0, 0.0);
while get_sign(frac) >= 0 {
key_val = iter.next().expect("get_sample_from: input p is empty");
frac -= key_val.1;
}
Some(key_val.0)
}
}
#[cfg(test)]
mod tests {
extern crate env_logger;
use std::fs::remove_file;
use commons::channel;
use std::thread::spawn;
use labeled_data::LabeledData;
use commons::ExampleWithScore;
use commons::Signal;
use commons::performance_monitor::PerformanceMonitor;
use super::StratifiedStorage;
use ::TFeature;
#[test]
fn test_mean() {
let _ = env_logger::try_init();
let filename = "unittest-stratified3.bin";
let batch = 100000;
let num_read = 1000000;
let (sampled_examples_send, sampled_examples_recv) = channel::bounded(1000, "sampled-examples");
let (_, models_recv) = channel::bounded(10, "updated-models");
let (signal_s, signal_r) = channel::bounded(10, "sampling-signal");
signal_s.send(Signal::START);
let stratified_storage = StratifiedStorage::new(
batch * 10, 1, "1".to_string(), 10000, filename, 4, 4,
sampled_examples_send, signal_r, models_recv, 10, false,
);
let updated_examples_send = stratified_storage.updated_examples_s.clone();
let mut pm_load = PerformanceMonitor::new();
pm_load.start();
let loading = spawn(move || {
for _ in 0..batch { | for i in 1..11 {
let t = get_example(vec![i as TFeature], i as f32); | random_line_split | |
mod.rs | disk, while a small number of examples remains
/// in memory to be writen to disk or just read from disk and ready to send out to the sampler.
///
/// The overall structure of the stratified storage is as follow:
///
/// 
///
/// The `Assigner`s read examples with updated scores from the `Sampler` and write them back to
/// the corresponding strata based on their new weights. The examples would be put into the
/// `In Queue`s first till a proper number of examples are accumulated that belongs to the
/// same strata, at that point they would be written into disk in batch.
///
/// Meanwhile, a certain number of examples from each stratum are loaded into the memory
/// from the disk and kept in `Out Queue`s.
/// The `Sampler`s iteratively select a stratum with a probability that proportional to
/// the sum of weights of all examples in that stratum, send its next sampled example to the memory
/// buffer, and remove that example from strata.
///
/// A `Shared Weight Table` maintains the sum of the weights of all examples in each stratum.
/// The `Assigner`s increase the value in the `Shared Weight Table` when a new example is inserted into
/// a stratum.
/// The `Sampler`s use the weights in the `Shared Weight Table` to decide which stratum to read next and
/// send its next sampled example to the memory buffer. After an example is processed, the `Sampler` also
/// updates its weight, sends it to right stratum, and updates `Shared Weight Table` accordingly.
pub fn new(
num_examples: usize,
feature_size: usize,
positive: String,
num_examples_per_block: usize,
disk_buffer_filename: &str,
num_assigners: usize,
num_samplers: usize,
sampled_examples: Sender<(ExampleWithScore, u32)>,
sampling_signal: Receiver<Signal>,
models: Receiver<Model>,
channel_size: usize,
debug_mode: bool,
) -> StratifiedStorage {
let strata = Strata::new(num_examples, feature_size, num_examples_per_block,
disk_buffer_filename);
let strata = Arc::new(RwLock::new(strata));
let (counts_table_r, mut counts_table_w) = evmap::new();
let (weights_table_r, mut weights_table_w) = evmap::new();
let (updated_examples_s, updated_examples_r) = channel::bounded(channel_size, "updated-examples");
// The messages in the stats channel are very small, so its capacity can be larger.
let (stats_update_s, stats_update_r) = channel::bounded(5000000, "stats");
// Update shared weights table (non-blocking)
{
let counts_table_r = counts_table_r.clone();
let weights_table_r = weights_table_r.clone();
spawn(move || {
while let Some((index, (count, weight))) = stats_update_r.recv() {
let val = counts_table_r.get_and(&index, |vs| vs[0]);
counts_table_w.update(index, val.unwrap_or(0) + count);
let cur = weights_table_r.get_and(&index, |vs: &[Box<F64>]| vs[0].val)
.unwrap_or(0.0);
weights_table_w.update(index, Box::new(F64 { val: cur + weight }));
{
counts_table_w.refresh();
weights_table_w.refresh();
}
}
});
}
// Monitor the distribution of strata
if debug_mode {
let counts_table_r = counts_table_r.clone();
let weights_table_r = weights_table_r.clone();
spawn(move || {
loop {
sleep(Duration::from_millis(5000));
let mut p: Vec<(i8, f64)> =
weights_table_r.map_into(|a: &i8, b: &[Box<F64>]| (a.clone(), b[0].val));
p.sort_by(|a, b| (a.0).cmp(&b.0));
let mut c: Vec<(i8, i32)> = counts_table_r.map_into(|a, b| (a.clone(), b[0]));
c.sort_by(|a, b| (a.0).cmp(&b.0));
let mut sump: f64 = p.iter().map(|t| t.1).sum();
if get_sign(sump) == 0 {
sump = 1.0;
}
let ps: Vec<String> = p.into_iter()
.map(|(idx, w)| (idx, 100.0 * w / sump))
.map(|(idx, w)| format!("({}, {:.2})", idx, w))
.collect();
debug!("strata weights distr, {}, {}", ps.join(", "), sump);
let sumc: i32 = max(c.iter().map(|t| t.1).sum(), 1);
let cs: Vec<String> = c.into_iter()
.map(|(idx, c)| (idx, 100.0 * c as f32 / (sumc as f32)))
.map(|(idx, c)| format!("({}, {:.2})", idx, c))
.collect();
debug!("strata counts distr, {}, {}", cs.join(", "), sumc);
}
});
}
let assigners = Assigners::new(
updated_examples_r,
strata.clone(),
stats_update_s.clone(),
num_assigners,
);
let samplers = Samplers::new(
strata.clone(),
sampled_examples.clone(),
updated_examples_s.clone(),
models.clone(),
stats_update_s.clone(),
weights_table_r.clone(),
sampling_signal.clone(),
num_samplers,
);
assigners.run();
samplers.run();
StratifiedStorage {
// num_examples: num_examples,
// feature_size: feature_size,
// num_examples_per_block: num_examples_per_block,
// disk_buffer_filename: String::from(disk_buffer_filename),
// strata: strata,
// stats_update_s: stats_update_s,
counts_table_r: counts_table_r,
weights_table_r: weights_table_r,
// num_assigners: num_assigners,
// num_samplers: num_samplers,
// updated_examples_r: updated_examples_r,
updated_examples_s: updated_examples_s,
// sampled_examples_s: sampled_examples,
// sampling_signal: sampling_signal,
// models: models,
positive: positive,
}
}
pub fn init_stratified_from_file(
&self,
filename: String,
size: usize,
batch_size: usize,
feature_size: usize,
range: Range<usize>,
bins: Vec<Bins>,
) {
let mut reader = SerialStorage::new(
filename.clone(),
size,
feature_size,
true,
self.positive.clone(),
None,
range.clone(),
);
let updated_examples_s = self.updated_examples_s.clone();
spawn(move || {
let mut index = 0;
while index < size {
reader.read_raw(batch_size).into_iter().for_each(|data| {
let features: Vec<TFeature> =
data.feature.iter().enumerate()
.map(|(idx, val)| {
if range.start <= idx && idx < range.end {
bins[idx - range.start].get_split_index(*val)
} else {
0
}
}).collect();
let mapped_data = LabeledData::new(features, data.label);
updated_examples_s.send((mapped_data, (0.0, 0)));
});
index += batch_size;
}
debug!("Raw data on disk has been loaded into the stratified storage, \
filename {}, capacity {}, feature size {}", filename, size, feature_size);
});
}
}
fn sample_weights_table(weights_table_r: &WeightTableRead) -> Option<i8> {
let p: Vec<(i8, f64)> = weights_table_r.map_into(|a, b| (a.clone(), b[0].val));
let sum_of_weights: f64 = p.iter().map(|t| t.1).sum();
if get_sign(sum_of_weights) == 0 {
None
} else {
let mut frac = rand::random::<f64>() * sum_of_weights;
let mut iter = p.iter();
let mut key_val = &(0, 0.0);
while get_sign(frac) >= 0 {
key_val = iter.next().expect("get_sample_from: input p is empty");
frac -= key_val.1;
}
Some(key_val.0)
}
}
#[cfg(test)]
mod tests {
extern crate env_logger;
use std::fs::remove_file;
use commons::channel;
use std::thread::spawn;
use labeled_data::LabeledData;
use commons::ExampleWithScore;
use commons::Signal;
use commons::performance_monitor::PerformanceMonitor;
use super::StratifiedStorage;
use ::TFeature;
#[test]
fn | test_mean | identifier_name | |
mod.rs | Sender<ExampleWithScore>,
// sampled_examples_s: Sender<(ExampleWithScore, u32)>,
// sampling_signal: Receiver<Signal>,
// models: Receiver<Model>,
positive: String,
}
impl StratifiedStorage {
/// Create the stratified storage structure.
///
/// * `num_examples`: the total number of examples in the training data set
/// * `feature_size`: the number of features of the training examples
/// * `num_examples_per_block`: the number of examples to write back to disk in batch (explained below)
/// * `disk_buffer_filename`: the name of the binary file for saving the examples in strata on disk
/// If such file does not exist, it will be created
/// * `num_assigners`: the number of threads that run the `Assigner`s (explained below)
/// * `num_samplers`: the number of threads that run the `Sampler`s (explained below)
/// * `sampled_examples`: the channle that the stratified storage sends the sampled examples to
/// the buffer loader
/// * `sampling_singal`: the channle that the buffer loader sends sampling signals to
/// start and stop the samplers as needed
/// * `models`: the channel that the booster sends the latest models in
///
/// Stratified storage organizes training examples according to their weights
/// given current learning model.
/// The examples are assigned to different strata so that the weight ratio of the examples
/// within the same stratum does not exceed 2.
/// Most examples in a stratum are stored on disk, while a small number of examples remains
/// in memory to be writen to disk or just read from disk and ready to send out to the sampler.
///
/// The overall structure of the stratified storage is as follow:
///
/// 
///
/// The `Assigner`s read examples with updated scores from the `Sampler` and write them back to
/// the corresponding strata based on their new weights. The examples would be put into the
/// `In Queue`s first till a proper number of examples are accumulated that belongs to the
/// same strata, at that point they would be written into disk in batch.
///
/// Meanwhile, a certain number of examples from each stratum are loaded into the memory
/// from the disk and kept in `Out Queue`s.
/// The `Sampler`s iteratively select a stratum with a probability that proportional to
/// the sum of weights of all examples in that stratum, send its next sampled example to the memory
/// buffer, and remove that example from strata.
///
/// A `Shared Weight Table` maintains the sum of the weights of all examples in each stratum.
/// The `Assigner`s increase the value in the `Shared Weight Table` when a new example is inserted into
/// a stratum.
/// The `Sampler`s use the weights in the `Shared Weight Table` to decide which stratum to read next and
/// send its next sampled example to the memory buffer. After an example is processed, the `Sampler` also
/// updates its weight, sends it to right stratum, and updates `Shared Weight Table` accordingly.
pub fn new(
num_examples: usize,
feature_size: usize,
positive: String,
num_examples_per_block: usize,
disk_buffer_filename: &str,
num_assigners: usize,
num_samplers: usize,
sampled_examples: Sender<(ExampleWithScore, u32)>,
sampling_signal: Receiver<Signal>,
models: Receiver<Model>,
channel_size: usize,
debug_mode: bool,
) -> StratifiedStorage | weights_table_w.update(index, Box::new(F64 { val: cur + weight }));
{
counts_table_w.refresh();
weights_table_w.refresh();
}
}
});
}
// Monitor the distribution of strata
if debug_mode {
let counts_table_r = counts_table_r.clone();
let weights_table_r = weights_table_r.clone();
spawn(move || {
loop {
sleep(Duration::from_millis(5000));
let mut p: Vec<(i8, f64)> =
weights_table_r.map_into(|a: &i8, b: &[Box<F64>]| (a.clone(), b[0].val));
p.sort_by(|a, b| (a.0).cmp(&b.0));
let mut c: Vec<(i8, i32)> = counts_table_r.map_into(|a, b| (a.clone(), b[0]));
c.sort_by(|a, b| (a.0).cmp(&b.0));
let mut sump: f64 = p.iter().map(|t| t.1).sum();
if get_sign(sump) == 0 {
sump = 1.0;
}
let ps: Vec<String> = p.into_iter()
.map(|(idx, w)| (idx, 100.0 * w / sump))
.map(|(idx, w)| format!("({}, {:.2})", idx, w))
.collect();
debug!("strata weights distr, {}, {}", ps.join(", "), sump);
let sumc: i32 = max(c.iter().map(|t| t.1).sum(), 1);
let cs: Vec<String> = c.into_iter()
.map(|(idx, c)| (idx, 100.0 * c as f32 / (sumc as f32)))
.map(|(idx, c)| format!("({}, {:.2})", idx, c))
.collect();
debug!("strata counts distr, {}, {}", cs.join(", "), sumc);
}
});
}
let assigners = Assigners::new(
updated_examples_r,
strata.clone(),
stats_update_s.clone(),
num_assigners,
);
let samplers = Samplers::new(
strata.clone(),
sampled_examples.clone(),
updated_examples_s.clone(),
models.clone(),
stats_update_s.clone(),
weights_table_r.clone(),
sampling_signal.clone(),
num_samplers,
);
assigners.run();
samplers.run();
StratifiedStorage {
// num_examples: num_examples,
// feature_size: feature_size,
// num_examples_per_block: num_examples_per_block,
// disk_buffer_filename: String::from(disk_buffer_filename),
// strata: strata,
// stats_update_s: stats_update_s,
counts_table_r: counts_table_r,
weights_table_r: weights_table_r,
// num_assigners: num_assigners,
// num_samplers: num_samplers,
// updated_examples_r: updated_examples_r,
updated_examples_s: updated_examples_s,
// sampled_examples_s: sampled_examples,
// sampling_signal: sampling_signal,
// models: models,
positive: positive,
}
}
pub fn init_stratified_from_file(
&self,
filename: String,
size: usize,
batch_size: usize,
feature_size: usize,
range: Range<usize>,
bins: Vec<Bins>,
) {
let mut reader = SerialStorage::new(
filename.clone(),
size,
feature_size,
true,
self.positive.clone(),
None,
range.clone(),
);
let updated_examples_s = self.updated_examples_s.clone();
spawn(move || {
let mut index = 0;
while index < size {
reader.read_raw(batch_size).into_iter().for_each(|data| {
let features: Vec<TFeature> =
data.feature.iter().enumerate()
.map(|(idx, val)| {
if range.start <= idx && idx < range.end {
bins[idx - range.start].get_split_index(*val)
} else {
0
}
}).collect();
let mapped_data = Labeled | {
let strata = Strata::new(num_examples, feature_size, num_examples_per_block,
disk_buffer_filename);
let strata = Arc::new(RwLock::new(strata));
let (counts_table_r, mut counts_table_w) = evmap::new();
let (weights_table_r, mut weights_table_w) = evmap::new();
let (updated_examples_s, updated_examples_r) = channel::bounded(channel_size, "updated-examples");
// The messages in the stats channel are very small, so its capacity can be larger.
let (stats_update_s, stats_update_r) = channel::bounded(5000000, "stats");
// Update shared weights table (non-blocking)
{
let counts_table_r = counts_table_r.clone();
let weights_table_r = weights_table_r.clone();
spawn(move || {
while let Some((index, (count, weight))) = stats_update_r.recv() {
let val = counts_table_r.get_and(&index, |vs| vs[0]);
counts_table_w.update(index, val.unwrap_or(0) + count);
let cur = weights_table_r.get_and(&index, |vs: &[Box<F64>]| vs[0].val)
.unwrap_or(0.0); | identifier_body |
mod.rs | Sender<ExampleWithScore>,
// sampled_examples_s: Sender<(ExampleWithScore, u32)>,
// sampling_signal: Receiver<Signal>,
// models: Receiver<Model>,
positive: String,
}
impl StratifiedStorage {
/// Create the stratified storage structure.
///
/// * `num_examples`: the total number of examples in the training data set
/// * `feature_size`: the number of features of the training examples
/// * `num_examples_per_block`: the number of examples to write back to disk in batch (explained below)
/// * `disk_buffer_filename`: the name of the binary file for saving the examples in strata on disk
/// If such file does not exist, it will be created
/// * `num_assigners`: the number of threads that run the `Assigner`s (explained below)
/// * `num_samplers`: the number of threads that run the `Sampler`s (explained below)
/// * `sampled_examples`: the channle that the stratified storage sends the sampled examples to
/// the buffer loader
/// * `sampling_singal`: the channle that the buffer loader sends sampling signals to
/// start and stop the samplers as needed
/// * `models`: the channel that the booster sends the latest models in
///
/// Stratified storage organizes training examples according to their weights
/// given current learning model.
/// The examples are assigned to different strata so that the weight ratio of the examples
/// within the same stratum does not exceed 2.
/// Most examples in a stratum are stored on disk, while a small number of examples remains
/// in memory to be writen to disk or just read from disk and ready to send out to the sampler.
///
/// The overall structure of the stratified storage is as follow:
///
/// 
///
/// The `Assigner`s read examples with updated scores from the `Sampler` and write them back to
/// the corresponding strata based on their new weights. The examples would be put into the
/// `In Queue`s first till a proper number of examples are accumulated that belongs to the
/// same strata, at that point they would be written into disk in batch.
///
/// Meanwhile, a certain number of examples from each stratum are loaded into the memory
/// from the disk and kept in `Out Queue`s.
/// The `Sampler`s iteratively select a stratum with a probability that proportional to
/// the sum of weights of all examples in that stratum, send its next sampled example to the memory
/// buffer, and remove that example from strata.
///
/// A `Shared Weight Table` maintains the sum of the weights of all examples in each stratum.
/// The `Assigner`s increase the value in the `Shared Weight Table` when a new example is inserted into
/// a stratum.
/// The `Sampler`s use the weights in the `Shared Weight Table` to decide which stratum to read next and
/// send its next sampled example to the memory buffer. After an example is processed, the `Sampler` also
/// updates its weight, sends it to right stratum, and updates `Shared Weight Table` accordingly.
pub fn new(
num_examples: usize,
feature_size: usize,
positive: String,
num_examples_per_block: usize,
disk_buffer_filename: &str,
num_assigners: usize,
num_samplers: usize,
sampled_examples: Sender<(ExampleWithScore, u32)>,
sampling_signal: Receiver<Signal>,
models: Receiver<Model>,
channel_size: usize,
debug_mode: bool,
) -> StratifiedStorage {
let strata = Strata::new(num_examples, feature_size, num_examples_per_block,
disk_buffer_filename);
let strata = Arc::new(RwLock::new(strata));
let (counts_table_r, mut counts_table_w) = evmap::new();
let (weights_table_r, mut weights_table_w) = evmap::new();
let (updated_examples_s, updated_examples_r) = channel::bounded(channel_size, "updated-examples");
// The messages in the stats channel are very small, so its capacity can be larger.
let (stats_update_s, stats_update_r) = channel::bounded(5000000, "stats");
// Update shared weights table (non-blocking)
{
let counts_table_r = counts_table_r.clone();
let weights_table_r = weights_table_r.clone();
spawn(move || {
while let Some((index, (count, weight))) = stats_update_r.recv() {
let val = counts_table_r.get_and(&index, |vs| vs[0]);
counts_table_w.update(index, val.unwrap_or(0) + count);
let cur = weights_table_r.get_and(&index, |vs: &[Box<F64>]| vs[0].val)
.unwrap_or(0.0);
weights_table_w.update(index, Box::new(F64 { val: cur + weight }));
{
counts_table_w.refresh();
weights_table_w.refresh();
}
}
});
}
// Monitor the distribution of strata
if debug_mode {
let counts_table_r = counts_table_r.clone();
let weights_table_r = weights_table_r.clone();
spawn(move || {
loop {
sleep(Duration::from_millis(5000));
let mut p: Vec<(i8, f64)> =
weights_table_r.map_into(|a: &i8, b: &[Box<F64>]| (a.clone(), b[0].val));
p.sort_by(|a, b| (a.0).cmp(&b.0));
let mut c: Vec<(i8, i32)> = counts_table_r.map_into(|a, b| (a.clone(), b[0]));
c.sort_by(|a, b| (a.0).cmp(&b.0));
let mut sump: f64 = p.iter().map(|t| t.1).sum();
if get_sign(sump) == 0 {
sump = 1.0;
}
let ps: Vec<String> = p.into_iter()
.map(|(idx, w)| (idx, 100.0 * w / sump))
.map(|(idx, w)| format!("({}, {:.2})", idx, w))
.collect();
debug!("strata weights distr, {}, {}", ps.join(", "), sump);
let sumc: i32 = max(c.iter().map(|t| t.1).sum(), 1);
let cs: Vec<String> = c.into_iter()
.map(|(idx, c)| (idx, 100.0 * c as f32 / (sumc as f32)))
.map(|(idx, c)| format!("({}, {:.2})", idx, c))
.collect();
debug!("strata counts distr, {}, {}", cs.join(", "), sumc);
}
});
}
let assigners = Assigners::new(
updated_examples_r,
strata.clone(),
stats_update_s.clone(),
num_assigners,
);
let samplers = Samplers::new(
strata.clone(),
sampled_examples.clone(),
updated_examples_s.clone(),
models.clone(),
stats_update_s.clone(),
weights_table_r.clone(),
sampling_signal.clone(),
num_samplers,
);
assigners.run();
samplers.run();
StratifiedStorage {
// num_examples: num_examples,
// feature_size: feature_size,
// num_examples_per_block: num_examples_per_block,
// disk_buffer_filename: String::from(disk_buffer_filename),
// strata: strata,
// stats_update_s: stats_update_s,
counts_table_r: counts_table_r,
weights_table_r: weights_table_r,
// num_assigners: num_assigners,
// num_samplers: num_samplers,
// updated_examples_r: updated_examples_r,
updated_examples_s: updated_examples_s,
// sampled_examples_s: sampled_examples,
// sampling_signal: sampling_signal,
// models: models,
positive: positive,
}
}
pub fn init_stratified_from_file(
&self,
filename: String,
size: usize,
batch_size: usize,
feature_size: usize,
range: Range<usize>,
bins: Vec<Bins>,
) {
let mut reader = SerialStorage::new(
filename.clone(),
size,
feature_size,
true,
self.positive.clone(),
None,
range.clone(),
);
let updated_examples_s = self.updated_examples_s.clone();
spawn(move || {
let mut index = 0;
while index < size {
reader.read_raw(batch_size).into_iter().for_each(|data| {
let features: Vec<TFeature> =
data.feature.iter().enumerate()
.map(|(idx, val)| {
if range.start <= idx && idx < range.end | else {
0
}
}).collect();
let mapped_data = Labeled | {
bins[idx - range.start].get_split_index(*val)
} | conditional_block |
my.js | 时后返回的角度,所以应该还是回到最原始的位置,2160是因为我要让它转6圈,就是360*6得来的
duration:8000,
callback:function (){
alert('网络超时,请检查您的网络设置!');
}
});
};
//旋转转盘 item:奖品位置; txt:提示语;
function rotateFn(item, txt){
//alert(111);
var angles = item * (360 / turnplate.restaraunts.length) - (360 / (turnplate.restaraunts.length*2));
if(angles<270){
angles = 270 - angles;
}else{
angles = 360 - angles + 270;
}
$('#wheelcanvas').stopRotate();
$('#wheelcanvas').rotate({
angle: 0, //angle是图片上各奖项对应的角度
animateTo:angles+1800,
duration: 2000,
callback:function (){
// alert(txt);
turnplate.bRotate | $('.stop').show();
if(turnplate.bRotate) return;
turnplate
.bRotate = !turnplate.bRotate;
$.ajax({
type : 'get',
url : base_url+ '&r=activity/lucky-draw',
success : function(res){
var res = res;
var item = res.data[0];
var gift = res.data[1];
rotateFn(item, gift);
$('.num').html(gift);
var str = '<li> 恭喜 <span>123456</span> 获得了 <span class="present">'+ gift +'</span></li>';
var timer;
if( gift === 'A' ){
$('.gift img').attr('src','static/activity/images/chest-icon-zuan.png');
// $('.tip').show(); //抽奖完毕后显示奖品
setTimeout(function(){
$('.stop').hide();
$('.tip').show();
$('#tab1').append(str);
},2000);
}else if( gift === 'B' ){
$('.gift img').attr('src','static/activity/images/coin.png');
// $('.tip').show(); //抽奖完毕后显示奖品
setTimeout(function(){
$('.stop').hide();
$('.tip').show();
$('#tab1').append(str);
},2000);
}else if( gift === 'C' ){
$('.gift img').attr('src','static/activity/images/coin.png');
// $('.tip').show();
setTimeout(function(){
$('.stop').hide();
$('.tip').show();
$('#tab1').append(str);
},2000);
}else if( gift === 'D' ){ //获得实物大奖则显示输入框
// $('.big').show();
setTimeout(function(){
$('.stop').hide();
$('.big').show();
$('#tab1').append(str);
},2000);
}
}
});
// }
// }
// });
});
//页面所有元素加载完毕后执行drawRouletteWheel()方法对转盘进行渲染
window.onload=function(){
drawRouletteWheel();
};
function drawRouletteWheel() {
var canvas = document.getElementById("wheelcanvas");
if (canvas.getContext) {
//根据奖品个数计算圆周角度
var arc = Math.PI / (turnplate.restaraunts.length/2);
var ctx = canvas.getContext("2d");
//在给定矩形内清空一个矩形
ctx.clearRect(0,0,422,422);
//strokeStyle 属性设置或返回用于笔触的颜色、渐变或模式
ctx.strokeStyle = "#FFBE04";
//font 属性设置或返回画布上文本内容的当前字体属性
ctx.font = '20px Microsoft YaHei';
for(var i = 0; i < turnplate.restaraunts.length; i++) {
var angle = turnplate.startAngle + i * arc;
ctx.fillStyle = turnplate.colors[i];
ctx.beginPath();
//arc(x,y,r,起始角,结束角,绘制方向) 方法创建弧/曲线(用于创建圆或部分圆)
ctx.arc(211, 211, turnplate.outsideRadius, angle, angle + arc, false);
ctx.arc(211, 211, turnplate.insideRadius, angle + arc, angle, true);
ctx.stroke();
ctx.fill();
//锁画布(为了保存之前的画布状态)
ctx.save();
//----绘制奖品开始----
ctx.fillStyle = "#E5302F";
var text = turnplate.restaraunts[i];
var line_height = 17;
//translate方法重新映射画布上的 (0,0) 位置
ctx.translate(211 + Math.cos(angle + arc / 2) * turnplate.textRadius, 211 + Math.sin(angle + arc / 2) * turnplate.textRadius);
//rotate方法旋转当前的绘图
ctx.rotate(angle + arc / 2 + Math.PI / 2);
/** 下面代码根据奖品类型、奖品名称长度渲染不同效果,如字体、颜色、图片效果。(具体根据实际情况改变) **/
// if(text.indexOf("M")>0){ //流量包
// var texts = text.split("M");
// for(var j = 0; j<texts.length; j++){
// ctx.font = j == 0?'bold 20px Microsoft YaHei':'16px Microsoft YaHei';
// if(j == 0){
// ctx.fillText(texts[j]+"M", -ctx.measureText(texts[j]+"M").width / 2, j * line_height);
// }else{
// ctx.fillText(texts[j], -ctx.measureText(texts[j]).width / 2, j * line_height);
// }
// }
// }else if(text.indexOf("M") == -1 && text.length>6){ //奖品名称长度超过一定范围
// text = text.substring(0,6)+"||"+text.substring(6);
// var texts = text.split("||");
// for(var j = 0; j<texts.length; j++){
// ctx.fillText(texts[j], -ctx.measureText(texts[j]).width / 2, j * line_height);
// }
// }else{
//在画布上绘制填色的文本。文本的默认颜色是黑色
//measureText()方法返回包含一个对象,该对象包含以像素计的指定字体宽度
ctx.fillText(text, -ctx.measureText(text).width / 2, 0);
// }
var imgs = document.getElementById("imgs").getElementsByTagName('img');
var img = imgs[i];
imgs.onload=function(){
ctx.drawImage(img,-15,10);
};
//添加对应图标
if(text.indexOf("金币")>0){
var img= document.getElementById("gold-img");
img.onload=function(){
ctx.drawImage(img,-15,10);
};
ctx.drawImage(img,-15,10);
}else if(text.indexOf("谢谢参与")>=0){
var img= document.getElementById("sorry-img");
img.onload=function(){
ctx.drawImage(img,-15,10);
};
ctx.drawImage(img,-15,10);
}else if(text.indexOf("钻石")>=0){
var img= document.getElementById("diamond-img");
img.onload=function(){
ctx.drawImage(img,-15,10);
};
ctx.drawImage(img,-15,10);
}
//把当前画布返回(调整)到上一个save()状态之前
ctx.restore();
//----绘制奖品结束----
}
}
}
// 中奖区的高度和大转盘一致
var h = $('.turnplate').height();
console.log(h);
$('.txt').height(h);
// 关闭中奖提示框
$('.off').click(function(){
$('.box').hide();
});
// 选项卡切换
$("#content ul").hide(); // Initially hide all content
$("#tabs li:first").attr("id","current"); // Activate first tab
$("#content ul:first").fadeIn(); // Show first tab content
$('#tabs a').click(function(e) {
e.preventDefault(); | = !turnplate.bRotate;
}
});
};
$('.pointer').click(function (){
// $.ajax({
// type : 'post',
// url : base_url + '&r=activity/free-draw',
// data : { user_id : 12345 },
// success : function(res){
// var res = res;
// if( res.ret_code == 2070 ){
// alert('您今日免费抽奖次数已用完,如需再次抽奖,需消耗10钻石/次,请确认是否继续?');
// //后续操作
// }else if( res.ret_code == 0 ){
//第一次免费抽奖 | identifier_body |
my.js | ()
{
var name,value;
var str=location.href; //取得整个地址栏
var num=str.indexOf("?")
str=str.substr(num+1); //取得所有参数 stringvar.substr(start [, length ]
var arr=str.split("&"); //各个参数放到数组里
for(var i=0;i < arr.length;i++){
num=arr[i].indexOf("=");
if(num>0){
name=arr[i].substring(0,num);
value=arr[i].substr(num+1);
this[name]=value;
}
}
}
var Request=new UrlSearch(); //实例化
var base_url = "http://dltest.sparkingfuture.com/basic/web/index.php?gid=" +Request.gid ;
var turnplate={
restaraunts:[], //大转盘奖品名称
colors:[], //大转盘奖品区块对应背景颜色
srcs:[], //大转盘奖品区块对应的缩略图src
outsideRadius:192, //大转盘外圆的半径
textRadius:155, //大转盘奖品位置距离圆心的距离
insideRadius:68, //大转盘内圆的半径
startAngle:0, //开始角度
bRotate:false //false:停止;ture:旋转
};
//动态添加大转盘的奖品与奖品区域背景颜色
turnplate.restaraunts = ["2钻石A", "20金币B", "5钻石C", "实物大奖D "];
turnplate.colors = ["#FFF4D6", "#FFFFFF","#FFF4D6", "#FFFFFF"];
turnplate.srcs = ['static/activity/images/chest-icon-zuan.png','static/activity/images/coin.png','static/activity/images/chest-icon-zuan.png'];
var rotateTimeOut = function (){ //超时函数
$('#wheelcanvas').rotate({
angle:0,
animateTo:2160, //这里是设置请求超时后返回的角度,所以应该还是回到最原始的位置,2160是因为我要让它转6圈,就是360*6得来的
duration:8000,
callback:function (){
alert('网络超时,请检查您的网络设置!');
}
});
};
//旋转转盘 item:奖品位置; txt:提示语;
function rotateFn(item, txt){
//alert(111);
var angles = item * (360 / turnplate.restaraunts.length) - (360 / (turnplate.restaraunts.length*2));
if(angles<270){
angles = 270 - angles;
}else{
angles = 360 - angles + 270;
}
$('#wheelcanvas').stopRotate();
$('#wheelcanvas').rotate({
angle: 0, //angle是图片上各奖项对应的角度
animateTo:angles+1800,
duration: 2000,
callback:function (){
// alert(txt);
turnplate.bRotate = !turnplate.bRotate;
}
});
};
$('.pointer').click(function (){
// $.ajax({
// type : 'post',
// url : base_url + '&r=activity/free-draw',
// data : { user_id : 12345 },
// success : function(res){
// var res = res;
// if( res.ret_code == 2070 ){
// alert('您今日免费抽奖次数已用完,如需再次抽奖,需消耗10钻石/次,请确认是否继续?');
// //后续操作
// }else if( res.ret_code == 0 ){
//第一次免费抽奖
$('.stop').show();
if(turnplate.bRotate) return;
turnplate.bRotate = !turnplate.bRotate;
$.ajax({
type : 'get',
url : base_url+ '&r=activity/lucky-draw',
success : function(res){
var res = res;
var item = res.data[0];
var gift = res.data[1];
rotateFn(item, gift);
$('.num').html(gift);
var str = '<li> 恭喜 <span>123456</span> 获得了 <span class="present">'+ gift +'</span></li>';
var timer;
if( gift === 'A' ){
$('.gift img').attr('src','static/activity/images/chest-icon-zuan.png');
// $('.tip').show(); //抽奖完毕后显示奖品
setTimeout(function(){
$('.stop').hide();
$('.tip').show();
$('#tab1').append(str);
},2000);
}else if( gift === 'B' ){
$('.gift img').attr('src','static/activity/images/coin.png');
// $('.tip').show(); //抽奖完毕后显示奖品
setTimeout(function(){
$('.stop').hide();
$('.tip').show();
$('#tab1').append(str);
},2000);
}else if( gift === 'C' ){
$('.gift img').attr('src','static/activity/images/coin.png');
// $('.tip').show();
setTimeout(function(){
$('.stop').hide();
$('.tip').show();
$('#tab1').append(str);
},2000);
}else if( gift === 'D' ){ //获得实物大奖则显示输入框
// $('.big').show();
setTimeout(function(){
$('.stop').hide();
$('.big').show();
$('#tab1').append(str);
},2000);
}
}
});
// }
// }
// });
});
//页面所有元素加载完毕后执行drawRouletteWheel()方法对转盘进行渲染
window.onload=function(){
drawRouletteWheel();
};
function drawRouletteWheel() {
var canvas = document.getElementById("wheelcanvas");
if (canvas.getContext) {
//根据奖品个数计算圆周角度
var arc = Math.PI / (turnplate.restaraunts.length/2);
var ctx = canvas.getContext("2d");
//在给定矩形内清空一个矩形
ctx.clearRect(0,0,422,422);
//strokeStyle 属性设置或返回用于笔触的颜色、渐变或模式
ctx.strokeStyle = "#FFBE04";
//font 属性设置或返回画布上文本内容的当前字体属性
ctx.font = '20px Microsoft YaHei';
for(var i = 0; i < turnplate.restaraunts.length; i++) {
var angle = turnplate.startAngle + i * arc;
ctx.fillStyle = turnplate.colors[i];
ctx.beginPath();
//arc(x,y,r,起始角,结束角,绘制方向) 方法创建弧/曲线(用于创建圆或部分圆)
ctx.arc(211, 211, turnplate.outsideRadius, angle, angle + arc, false);
ctx.arc(211, 211, turnplate.insideRadius, angle + arc, angle, true);
ctx.stroke();
ctx.fill();
//锁画布(为了保存之前的画布状态)
ctx.save();
//----绘制奖品开始----
ctx.fillStyle = "#E5302F";
var text = turnplate.restaraunts[i];
var line_height = 17;
//translate方法重新映射画布上的 (0,0) 位置
ctx.translate(211 + Math.cos(angle + arc / 2) * turnplate.textRadius, 211 + Math.sin(angle + arc / 2) * turnplate.textRadius);
//rotate方法旋转当前的绘图
ctx.rotate(angle + arc / 2 + Math.PI / 2);
/** 下面代码根据奖品类型、奖品名称长度渲染不同效果,如字体、颜色、图片效果。(具体根据实际情况改变) **/
// if(text.indexOf("M")>0){ //流量包
// var texts = text.split("M");
// for(var j = 0; j<texts.length; j++){
// ctx.font = j == 0?'bold 20px Microsoft YaHei':'16px Microsoft YaHei';
// if(j == 0){
// ctx.fillText(texts[j]+"M", -ctx.measureText(texts[j]+"M").width / 2, j * line_height);
// }else{
// ctx.fillText(texts[j], -ctx.measureText(texts[j]).width / 2, j * line_height);
// }
// }
// }else if(text.indexOf("M") == -1 && text.length>6){ //奖品名称长度超过一定范围
// text = text.substring(0,6)+"||"+text.substring(6);
// var texts = text.split("||");
// for(var j = 0; j<texts.length; j++){
// | UrlSearch | identifier_name | |
my.js | 时后返回的角度,所以应该还是回到最原始的位置,2160是因为我要让它转6圈,就是360*6得来的
duration:8000,
callback:function (){
alert('网络超时,请检查您的网络设置!');
}
});
};
//旋转转盘 item:奖品位置; txt:提示语;
function rotateFn(item, txt){
//alert(111);
var angles = item * (360 / turnplate.restaraunts.length) - (360 / (turnplate.restaraunts.length*2));
if(angles<270){
angles = 270 - angles;
}else{
angles = 360 - angles + 270;
}
$('#wheelcanvas').stopRotate();
$('#wheelcanvas').rotate({
angle: 0, //angle是图片上各奖项对应的角度
animateTo:angles+1800,
duration: 2000,
callback:function (){
// alert(txt);
turnplate.bRotate = !turnplate.bRotate;
}
});
};
$('.pointer').click(function (){
// $.ajax({
// type : 'post',
// url : base_url + '&r=activity/free-draw',
// data : { user_id : 12345 },
// success : function(res){
// var res = res;
// if( res.ret_code == 2070 ){
// alert('您今日免费抽奖次数已用完,如需再次抽奖,需消耗10钻石/次,请确认是否继续?');
// //后续操作
// }else if( res.ret_code == 0 ){
//第一次免费抽奖
$('.stop').show();
if(turnplate.bRotate) return;
turnplate.bRotate = !turnplate.bRotate;
$.ajax({
type : 'get',
url : base_url+ '&r=activity/lucky-draw',
success : function(res){
var res = res;
var item = res.data[0];
var gift = res.data[1];
rotateFn(item, gift);
$('.num').html(gift);
var str = '<li> 恭喜 <span>123456</span> 获得了 <span class="present">'+ gift +'</span></li>';
var timer;
if( gift === 'A' ){
$('.gift img').attr('src','static/activity/images/chest-icon-zuan.png');
// $('.tip').show(); //抽奖完毕后显示奖品
setTimeout(function(){
$('.stop').hide();
$('.tip').show();
$('#tab1').append(str);
},2000);
}else if( gift === 'B' ){
$('.gift img').attr('src','static/activity/images/coin.png');
// $('.tip').show(); //抽奖完毕后显示奖品
setTimeout(function(){
$('.stop').hide();
$('.tip').show();
$('#tab1').append(str);
},2000);
}else if( gift === 'C' ){
$('.gift img').attr('src','static/activity/images/coin.png');
// $('.tip').show();
setTimeout(function(){
$('.stop').hide();
$('.tip').show();
$('#tab1').append(str);
},2000);
}else if( gift === 'D' ){ //获得实物大奖则显示输入框
// $('.big').show();
setTimeout(function(){
$('.stop').hide();
$('.big').show();
$('#tab1').append(str);
},2000);
}
}
});
// }
// }
// });
});
//页面所有元素加载完毕后执行drawRouletteWheel()方法对转盘进行渲染
window.onload=function(){
drawRouletteWheel();
};
function drawRouletteWheel() {
var canvas = document.getElementById("wheelcanvas");
if (canvas.getContext) {
//根据奖品个数计算圆周角度
var arc = Math.PI / (turnplate.restaraunts.length/2);
var ctx = canvas.getContext("2d");
//在给定矩形内清空一个矩形
ctx.clearRect(0,0,422,422);
//strokeStyle 属性设置或返回用于笔触的颜色、渐变或模式
ctx.strokeStyle = "#FFBE04";
//font 属性设置或返回画布上文本内容的当前字体属性
ctx.font = '20px Microsoft YaHei';
for(var i = 0; i < turnplate.restaraunts.length; i++) {
var angle = turnplate.startAngle + i * arc;
ctx.fillStyle = turnplate.colors[i];
ctx.beginPath();
//arc(x,y,r,起始角,结束角,绘制方向) 方法创建弧/曲线(用于创建圆或部分圆)
ctx.arc(211, 211, turnplate.outsideRadius, angle, angle + arc, false);
ctx.arc(211, 211, turnplate.insideRadius, angle + arc, angle, true);
ctx.stroke();
ctx.fill();
//锁画布(为了保存之前的画布状态)
ctx.save();
//----绘制奖品开始----
ctx.fillStyle = "#E5302F";
var text = turnplate.restaraunts[i];
var line_height = 17;
//translate方法重新映射画布上的 (0,0) 位置
ctx.translate(211 + Math.cos(angle + arc / 2) * turnplate.textRadius, 211 + Math.sin(angle + arc / 2) * turnplate.textRadius);
//rotate方法旋转当前的绘图
ctx.rotate(angle + arc / 2 + Math.PI / 2);
/** 下面代码根据奖品类型、奖品名称长度渲染不同效果,如字体、颜色、图片效果。(具体根据实际情况改变) **/
// if(text.indexOf("M")>0){ //流量包
// var texts = text.split("M");
// for(var j = 0; j<texts.length; j++){
// ctx.font = j == 0?'bold 20px Microsoft YaHei':'16px Microsoft YaHei';
// if(j == 0){
// ctx.fillText(texts[j]+"M", -ctx.measureText(texts[j]+"M").width / 2, j * line_height);
// }else{
// ctx.fillText(texts[j], -ctx.measureText(texts[j]).width / 2, j * line_height);
// }
// }
// }else if(text.indexOf("M") == -1 && text.length>6){ //奖品名称长度超过一定范围
// text = text.substring(0,6)+"||"+text.substring(6);
// var texts = text.split("||");
// for(var j = 0; j<texts.length; j++){
// ctx.fillText(texts[j], -ctx.measureText(texts[j]).width / 2, j * line_height);
// }
// }else{
//在画布上绘制填色的文本。文本的默认颜色是黑色
//measureText()方法返回包含一个对象,该对象包含以像素计的指定字体宽度
ctx.fillText(text, -ctx.measureText(text).width / 2, 0);
// }
var imgs = document.getElementById("imgs").getElementsByTagName('img');
var img = imgs[i];
imgs.onload=function(){
ctx.drawImage(img,-15,10);
};
//添加对应图标
if(text.indexOf("金币")>0){
var img= document.getElementById("gold-img");
img.onload=function(){
ctx.drawImage(img,-15,10);
};
ctx.drawImage(img,-15,10);
}else if(text.indexOf("谢谢参与")>=0){
var img= document.getElementById("sorry-img");
img.onload=function(){
ctx.drawImage(img,-15,10);
};
ctx.drawImage(img,-15,10);
}else if(text.indexOf("钻石")>=0){
var img= document.getElementById("diamond-img");
img.onload=function(){
ctx.drawImage(img,-15,10);
};
ctx.drawImage(img,-15,10);
}
//把当前画布返回(调整)到上一个save()状态之前
ctx.restore();
//----绘制奖品结束----
}
} | // 中奖区的高度和大转盘一致
var h = $('.turnplate').height();
console.log(h);
$('.txt').height(h);
// 关闭中奖提示框
$('.off').click(function(){
$('.box').hide();
});
// 选项卡切换
$("#content ul").hide(); // Initially hide all content
$("#tabs li:first").attr("id","current"); // Activate first tab
$("#content ul:first").fadeIn(); // Show first tab content
$('#tabs a').click(function(e) {
e.preventDefault(); | }
| random_line_split |
core.go | return false
}
}
func (r *Rule) checkIP(v interface{}) (bool, error) {
if strings.HasSuffix(strings.ToLower(r.Key), "ip") {
ver1, ok := r.Val.(string)
if ok {
ver2, ok := v.(string)
if ok {
if len(strings.Split(ver2, ".")) == 4 {
return CheckIP(ver2, ver1), nil
}
}
}
}
return false, errors.New("invalid ip")
}
func (r *Rule) checkSemver(v interface{}) (bool, error) {
op := r.Op
if strings.HasSuffix(strings.ToLower(r.Key), "version") {
ver1, ok := r.Val.(string)
if ok {
validate := true
ver2, ok := v.(string)
if ok {
/*v1len := len(strings.Split(ver1, "."))
v2len := len(strings.Split(ver2, "."))
if v1len > 4 || v2len > 4 {
validate = false
} else {
if v1len != v2len {
if v1len > v2len {
for i:=0; i< v1len - v2len; i++ {
//ver2 += ".0"
}
} else if v1len < v2len {
for i:=0; i< v2len - v1len; i++ {
//ver1 += ".0"
}
}
}
}*/
} else {
validate = false
}
if validate {
ver1 = repair(ver1)
constraint, err := semver.NewConstraint(op + " " + ver1)
if err == nil {
ver2 = repair(ver2)
version, err := semver.NewVersion(ver2)
if err == nil {
return constraint.Check(version), nil
}
}
}
}
}
return false, errors.New("invalid semver")
}
func repair(ver string) string {
if len(strings.Split(ver, ".")) > 3 {
//4位版本号,不含'-',转rc版本
if strings.Index(ver, "-") == -1 {
versions := strings.Split(ver, ".")
return versions[0] + "." + versions[1] + "." + versions[2] + "-rc." + versions[3]
}
}
return ver
}
func pluck(key string, o map[string]interface{}) interface{} {
if o == nil || key == EmptyStr {
return nil
}
paths := strings.Split(key, ".")
var ok bool
for index, step := range paths {
// last step is object key
if index == len(paths)-1 {
return o[step]
}
// explore deeper
if o, ok = o[step].(map[string]interface{}); !ok {
return nil
}
}
return nil
}
func formatNumber(v interface{}) float64 {
switch t := v.(type) {
case uint:
return float64(t)
case uint8:
return float64(t)
case uint16:
return float64(t)
case uint32:
return float64(t)
case uint64:
return float64(t)
case int:
return float64(t)
case int8:
return float64(t)
case int16:
return float64(t)
case int32:
return float64(t)
case int64:
return float64(t)
case float32:
return float64(t)
case float64:
return t
default:
return 0
}
}
func checkRegex(pattern, o string) bool {
regex, err := regexp.Compile(pattern)
if err != nil {
return false
}
return regex.MatchString(o)
}
func formatLogicExpression(strRawExpr string) string {
var flagPre, flagNow string
strBracket := "bracket"
strSpace := "space"
strNotSpace := "notSpace"
strOrigin := strings.ToLower(strRawExpr)
runesPretty := make([]rune, 0)
for _, c := range strOrigin {
if c <= '9' && c >= '0' {
flagNow = "num"
} else if c <= 'z' && c >= 'a' {
flagNow = "char"
} else if c == '(' || c == ')' {
flagNow = strBracket
} else {
flagNow = flagPre
}
if flagNow != flagPre || flagNow == strBracket && flagPre == strBracket {
// should insert space here
runesPretty = append(runesPretty, []rune(Space)[0])
}
runesPretty = append(runesPretty, c)
flagPre = flagNow
}
// remove redundant space
flagPre = strNotSpace
runesTrim := make([]rune, 0)
for _, c := range runesPretty {
if c == []rune(Space)[0] {
flagNow = strSpace
} else {
flagNow = strNotSpace
}
if flagNow == strSpace && flagPre == strSpace {
// continuous space
continue
} else {
runesTrim = append(runesTrim, c)
}
flagPre = flagNow
}
strPrettyTrim := string(runesTrim)
strPrettyTrim = strings.Trim(strPrettyTrim, Space)
return strPrettyTrim
}
func isFormatLogicExpressionAllValidSymbol(strFormatLogic string) bool {
listSymbol := strings.Split(strFormatLogic, Space)
for _, symbol := range listSymbol {
flag := false
regex, err := regexp.Compile(PatternNumber)
if err != nil {
return false
}
if regex.MatchString(symbol) {
// is number ok
continue
}
for _, op := range ValidOperators {
if op == symbol {
// is operator ok
flag = true
}
}
for _, v := range []string{"(", ")"} {
if v == symbol {
// is bracket ok
flag = true
}
}
if !flag {
return false
}
}
return true
}
func isFormatLogicExpressionAllIdsExist(strFormatLogic string, rules *Rules) bool {
mapExistIds := make(map[string]bool)
for _, eachRule := range rules.Rules {
mapExistIds[strconv.Itoa(eachRule.ID)] = true
}
listSymbol := strings.Split(strFormatLogic, Space)
regex, err := regexp.Compile(PatternNumber)
if err != nil {
return false
}
for _, symbol := range listSymbol {
if regex.MatchString(symbol) {
// is id, check it
if _, ok := mapExistIds[symbol]; ok {
continue
} else {
return false
}
}
}
return true
}
func tryToCalculateResultByFormatLogicExpressionWithRandomProbe(strFormatLogic string) error {
listSymbol := strings.Split(strFormatLogic, Space)
regex, err := regexp.Compile(PatternNumber)
if err != nil {
return err
}
// random probe
mapProbe := make(map[int]bool)
for _, symbol := range listSymbol {
if regex.MatchString(symbol) {
id, iErr := strconv.Atoi(symbol)
if iErr != nil {
return iErr
}
randomInt := rand.Intn(10)
randomBool := randomInt < 5
mapProbe[id] = randomBool
}
}
// calculate still use reverse_polish_notation
r := &Rules{}
_, err = r.calculateExpression(strFormatLogic, mapProbe)
return err
}
func numOfOperandInLogic(op string) int8 {
mapOperand := map[string]int8{"or": 2, "and": 2, "not": 1}
return mapOperand[op]
}
func computeOneInLogic(op string, v []bool) (bool, error) {
switch op {
case "or":
return v[0] || v[1], nil
case "and":
return v[0] && v[1], nil
case "not":
return !v[0], nil
default:
return false, errors.New("unrecognized op")
}
}
func isIn(needle, haystack string, isNeedleNum bool) bool {
// get number of ne | edle
var iNum float64
var err error
if isNeedleNum {
if iNum, err = strconv.ParseFloat(needle, 64); err != nil {
return false
}
}
// compatible to "1, 2, 3" and "1,2,3"
li := strings.Split(haystack, ",")
for _, o := range li {
trimO := strings.TrimLeft(o, " ")
if isNeedleNum {
oNum, err := strconv.ParseFloat(trimO, 64)
if err != nil {
continue
}
if math.Abs(iNum-oNum) < 1e-5 {
// 考虑浮点精度问题
return true | identifier_body | |
core.go | func validLogic(logic string) (string, error) {
formatLogic := formatLogicExpression(logic)
if formatLogic == Space || formatLogic == EmptyStr {
return EmptyStr, nil
}
// validate the formatLogic string
// 1. only contain legal symbol
isValidSymbol := isFormatLogicExpressionAllValidSymbol(formatLogic)
if !isValidSymbol {
return EmptyStr, errors.New("invalid logic expression: invalid symbol")
}
// 2. check logic expression by trying to calculate result with random bool
err := tryToCalculateResultByFormatLogicExpressionWithRandomProbe(formatLogic)
if err != nil {
return EmptyStr, errors.New("invalid logic expression: can not calculate")
}
return formatLogic, nil
}
func injectLogic(rules *Rules, logic string) (*Rules, error) {
formatLogic, err := validLogic(logic)
if err != nil {
return nil, err
}
if formatLogic == EmptyStr {
return rules, nil
}
// all ids in logic string must be in rules ids
isValidIds := isFormatLogicExpressionAllIdsExist(formatLogic, rules)
if !isValidIds {
return nil, errors.New("invalid logic expression: invalid id")
}
rules.Logic = formatLogic
return rules, nil
}
func injectExtractInfo(rules *Rules, extractInfo map[string]string) *Rules {
if name, ok := extractInfo["name"]; ok {
rules.Name = name
}
if msg, ok := extractInfo["msg"]; ok {
rules.Msg = msg
}
return rules
}
func newRulesWithJSON(jsonStr []byte) (*Rules, error) {
var rules []*Rule
err := json.Unmarshal(jsonStr, &rules)
if err != nil {
return nil, err
}
return newRulesWithArray(rules), nil
}
func newRulesWithArray(rules []*Rule) *Rules {
// give rule an id
var maxID = 1
for _, rule := range rules {
if rule.ID > maxID {
maxID = rule.ID
}
}
for index := range rules {
if rules[index].ID == 0 {
maxID++
rules[index].ID = maxID
}
}
return &Rules{
Rules: rules,
}
}
func (rs *Rules) fitWithMapInFact(o map[string]interface{}) (bool, map[int]string, map[int]interface{}) {
var results = make(map[int]bool)
var tips = make(map[int]string)
var values = make(map[int]interface{})
var hasLogic = false
var allRuleIDs []int
if rs.Logic != EmptyStr {
hasLogic = true
}
for _, rule := range rs.Rules {
v := pluck(rule.Key, o)
if v != nil && rule.Val != nil {
typeV := reflect.TypeOf(v)
typeR := reflect.TypeOf(rule.Val)
if !typeV.Comparable() || !typeR.Comparable() {
return false, nil, nil
}
}
values[rule.ID] = v
flag := rule.fit(v)
results[rule.ID] = flag
if !flag {
// fit false, record msg, for no logic expression usage
tips[rule.ID] = rule.Msg
}
allRuleIDs = append(allRuleIDs, rule.ID)
}
// compute result by considering logic
if !hasLogic {
for _, flag := range results {
if !flag {
return false, tips, values
}
}
return true, rs.getTipsByRuleIDs(allRuleIDs), values
}
answer, ruleIDs, err := rs.calculateExpressionByTree(results)
// tree can return fail reasons in fact
tips = rs.getTipsByRuleIDs(ruleIDs)
if err != nil {
return false, nil, values
}
return answer, tips, values
}
func (rs *Rules) getTipsByRuleIDs(ids []int) map[int]string {
var tips = make(map[int]string)
var allTips = make(map[int]string)
for _, rule := range rs.Rules {
allTips[rule.ID] = rule.Msg
}
for _, id := range ids {
tips[id] = allTips[id]
}
return tips
}
func (r *Rule) fit(v interface{}) bool {
if check, err := r.checkSemver(v); err == nil {
return check
}
if check, err := r.checkIP(v); err == nil {
return check
}
op := r.Op
// judge if need convert to uniform type
var ok bool
// index-0 actual, index-1 expect
var pairStr = make([]string, 2)
var pairNum = make([]float64, 2)
var isStr, isNum, isObjStr, isRuleStr bool
pairStr[0], ok = v.(string)
if !ok {
pairNum[0] = formatNumber(v)
isStr = false
isNum = true
isObjStr = false
} else {
isStr = true
isNum = false
isObjStr = true
}
pairStr[1], ok = r.Val.(string)
if !ok {
pairNum[1] = formatNumber(r.Val)
isStr = false
isRuleStr = false
} else {
isNum = false
isRuleStr = true
}
var flagOpIn bool
// if in || nin
if op == "@" || op == "in" || op == "!@" || op == "nin" || op == "<<" || op == "between" {
flagOpIn = true
if !isObjStr && isRuleStr {
pairStr[0] = strconv.FormatFloat(pairNum[0], 'f', -1, 64)
}
}
// if types different, ignore in & nin
if !isStr && !isNum && !flagOpIn {
return false
}
switch op {
case "=", "eq":
if isNum {
return pairNum[0] == pairNum[1]
}
if isStr {
return pairStr[0] == pairStr[1]
}
return false
case ">", "gt":
if isNum {
return pairNum[0] > pairNum[1]
}
if isStr {
return pairStr[0] > pairStr[1]
}
return false
case "<", "lt":
if isNum {
return pairNum[0] < pairNum[1]
}
if isStr {
return pairStr[0] < pairStr[1]
}
return false
case ">=", "gte":
if isNum {
return pairNum[0] >= pairNum[1]
}
if isStr {
return pairStr[0] >= pairStr[1]
}
return false
case "<=", "lte":
if isNum {
return pairNum[0] <= pairNum[1]
}
if isStr {
return pairStr[0] <= pairStr[1]
}
return false
case "!=", "neq":
if isNum {
return pairNum[0] != pairNum[1]
}
if isStr {
return pairStr[0] != pairStr[1]
}
return false
case "@", "in":
return isIn(pairStr[0], pairStr[1], !isObjStr)
case "!@", "nin":
return !isIn(pairStr[0], pairStr[1], !isObjStr)
case "^$", "regex":
return checkRegex(pairStr[1], pairStr[0])
case "0", "empty":
return v == nil
case "1", "nempty":
return v != nil
case "<<", "between":
return isBetween(pairNum[0], pairStr[1])
case "@@", "intersect":
return isIntersect(pairStr[1], pairStr[0])
default:
return false
}
}
func (r *Rule) checkIP(v interface{}) (bool, error) {
if strings.HasSuffix(strings.ToLower(r.Key), "ip") {
ver1, ok := r.Val.(string)
if ok {
ver2, ok := v.(string)
if ok {
if len(strings.Split(ver2, ".")) == 4 {
return CheckIP(ver2, ver1), nil
}
}
}
}
return false, errors.New("invalid ip")
}
func (r *Rule) checkSemver(v interface{}) (bool, error) {
op := r.Op
if strings.HasSuffix(strings.ToLower(r.Key), "version") {
ver1, ok := r.Val.(string)
if ok {
validate := true
ver2, ok := v.(string)
if ok {
/*v1len := len(strings.Split(ver1, "."))
v2len := len(strings.Split(ver2, "."))
if v1len > 4 || v2len > 4 {
validate = false
} else {
if v1len != v2len {
if v1len > v2len {
for i:=0; i< v1len - v2len; i++ {
| )
| random_line_split | |
core.go |
for _, rule := range rs.Rules {
v := pluck(rule.Key, o)
if v != nil && rule.Val != nil {
typeV := reflect.TypeOf(v)
typeR := reflect.TypeOf(rule.Val)
if !typeV.Comparable() || !typeR.Comparable() {
return false, nil, nil
}
}
values[rule.ID] = v
flag := rule.fit(v)
results[rule.ID] = flag
if !flag {
// fit false, record msg, for no logic expression usage
tips[rule.ID] = rule.Msg
}
allRuleIDs = append(allRuleIDs, rule.ID)
}
// compute result by considering logic
if !hasLogic {
for _, flag := range results {
if !flag {
return false, tips, values
}
}
return true, rs.getTipsByRuleIDs(allRuleIDs), values
}
answer, ruleIDs, err := rs.calculateExpressionByTree(results)
// tree can return fail reasons in fact
tips = rs.getTipsByRuleIDs(ruleIDs)
if err != nil {
return false, nil, values
}
return answer, tips, values
}
func (rs *Rules) getTipsByRuleIDs(ids []int) map[int]string {
var tips = make(map[int]string)
var allTips = make(map[int]string)
for _, rule := range rs.Rules {
allTips[rule.ID] = rule.Msg
}
for _, id := range ids {
tips[id] = allTips[id]
}
return tips
}
func (r *Rule) fit(v interface{}) bool {
if check, err := r.checkSemver(v); err == nil {
return check
}
if check, err := r.checkIP(v); err == nil {
return check
}
op := r.Op
// judge if need convert to uniform type
var ok bool
// index-0 actual, index-1 expect
var pairStr = make([]string, 2)
var pairNum = make([]float64, 2)
var isStr, isNum, isObjStr, isRuleStr bool
pairStr[0], ok = v.(string)
if !ok {
pairNum[0] = formatNumber(v)
isStr = false
isNum = true
isObjStr = false
} else {
isStr = true
isNum = false
isObjStr = true
}
pairStr[1], ok = r.Val.(string)
if !ok {
pairNum[1] = formatNumber(r.Val)
isStr = false
isRuleStr = false
} else {
isNum = false
isRuleStr = true
}
var flagOpIn bool
// if in || nin
if op == "@" || op == "in" || op == "!@" || op == "nin" || op == "<<" || op == "between" {
flagOpIn = true
if !isObjStr && isRuleStr {
pairStr[0] = strconv.FormatFloat(pairNum[0], 'f', -1, 64)
}
}
// if types different, ignore in & nin
if !isStr && !isNum && !flagOpIn {
return false
}
switch op {
case "=", "eq":
if isNum {
return pairNum[0] == pairNum[1]
}
if isStr {
return pairStr[0] == pairStr[1]
}
return false
case ">", "gt":
if isNum {
return pairNum[0] > pairNum[1]
}
if isStr {
return pairStr[0] > pairStr[1]
}
return false
case "<", "lt":
if isNum {
return pairNum[0] < pairNum[1]
}
if isStr {
return pairStr[0] < pairStr[1]
}
return false
case ">=", "gte":
if isNum {
return pairNum[0] >= pairNum[1]
}
if isStr {
return pairStr[0] >= pairStr[1]
}
return false
case "<=", "lte":
if isNum {
return pairNum[0] <= pairNum[1]
}
if isStr {
return pairStr[0] <= pairStr[1]
}
return false
case "!=", "neq":
if isNum {
return pairNum[0] != pairNum[1]
}
if isStr {
return pairStr[0] != pairStr[1]
}
return false
case "@", "in":
return isIn(pairStr[0], pairStr[1], !isObjStr)
case "!@", "nin":
return !isIn(pairStr[0], pairStr[1], !isObjStr)
case "^$", "regex":
return checkRegex(pairStr[1], pairStr[0])
case "0", "empty":
return v == nil
case "1", "nempty":
return v != nil
case "<<", "between":
return isBetween(pairNum[0], pairStr[1])
case "@@", "intersect":
return isIntersect(pairStr[1], pairStr[0])
default:
return false
}
}
func (r *Rule) checkIP(v interface{}) (bool, error) {
if strings.HasSuffix(strings.ToLower(r.Key), "ip") {
ver1, ok := r.Val.(string)
if ok {
ver2, ok := v.(string)
if ok {
if len(strings.Split(ver2, ".")) == 4 {
return CheckIP(ver2, ver1), nil
}
}
}
}
return false, errors.New("invalid ip")
}
func (r *Rule) checkSemver(v interface{}) (bool, error) {
op := r.Op
if strings.HasSuffix(strings.ToLower(r.Key), "version") {
ver1, ok := r.Val.(string)
if ok {
validate := true
ver2, ok := v.(string)
if ok {
/*v1len := len(strings.Split(ver1, "."))
v2len := len(strings.Split(ver2, "."))
if v1len > 4 || v2len > 4 {
validate = false
} else {
if v1len != v2len {
if v1len > v2len {
for i:=0; i< v1len - v2len; i++ {
//ver2 += ".0"
}
} else if v1len < v2len {
for i:=0; i< v2len - v1len; i++ {
//ver1 += ".0"
}
}
}
}*/
} else {
validate = false
}
if validate {
ver1 = repair(ver1)
constraint, err := semver.NewConstraint(op + " " + ver1)
if err == nil {
ver2 = repair(ver2)
version, err := semver.NewVersion(ver2)
if err == nil {
return constraint.Check(version), nil
}
}
}
}
}
return false, errors.New("invalid semver")
}
func repair(ver string) string {
if len(strings.Split(ver, ".")) > 3 {
//4位版本号,不含'-',转rc版本
if strings.Index(ver, "-") == -1 {
versions := strings.Split(ver, ".")
return versions[0] + "." + versions[1] + "." + versions[2] + "-rc." + versions[3]
}
}
return ver
}
func pluck(key string, o map[string]interface{}) interface{} {
if o == nil || key == EmptyStr {
return nil
}
paths := strings.Split(key, ".")
var ok bool
for index, step := range paths {
// last step is object key
if index == len(paths)-1 {
return o[step]
}
// explore deeper
if o, ok = o[step].(map[string]interface{}); !ok {
return nil
}
}
return nil
}
func formatNumber(v interface{}) float64 {
switch t := v.(type) {
case uint:
return float64(t)
case uint8:
return float64(t)
case uint16:
return float64(t)
case uint32:
return float64(t)
case uint64:
return float64(t)
case int:
return float64(t)
case int8:
return float64(t)
case int16:
return float64(t)
case int32:
return float64(t)
case int64:
return float64(t)
case float32:
return float64(t)
case float64:
return t
default:
return 0
}
}
func checkRegex(pattern, o string) bool {
regex, err := regexp.Compile(pattern | {
hasLogic = true
} | conditional_block | |
core.go | pairNum[1] = formatNumber(r.Val)
isStr = false
isRuleStr = false
} else {
isNum = false
isRuleStr = true
}
var flagOpIn bool
// if in || nin
if op == "@" || op == "in" || op == "!@" || op == "nin" || op == "<<" || op == "between" {
flagOpIn = true
if !isObjStr && isRuleStr {
pairStr[0] = strconv.FormatFloat(pairNum[0], 'f', -1, 64)
}
}
// if types different, ignore in & nin
if !isStr && !isNum && !flagOpIn {
return false
}
switch op {
case "=", "eq":
if isNum {
return pairNum[0] == pairNum[1]
}
if isStr {
return pairStr[0] == pairStr[1]
}
return false
case ">", "gt":
if isNum {
return pairNum[0] > pairNum[1]
}
if isStr {
return pairStr[0] > pairStr[1]
}
return false
case "<", "lt":
if isNum {
return pairNum[0] < pairNum[1]
}
if isStr {
return pairStr[0] < pairStr[1]
}
return false
case ">=", "gte":
if isNum {
return pairNum[0] >= pairNum[1]
}
if isStr {
return pairStr[0] >= pairStr[1]
}
return false
case "<=", "lte":
if isNum {
return pairNum[0] <= pairNum[1]
}
if isStr {
return pairStr[0] <= pairStr[1]
}
return false
case "!=", "neq":
if isNum {
return pairNum[0] != pairNum[1]
}
if isStr {
return pairStr[0] != pairStr[1]
}
return false
case "@", "in":
return isIn(pairStr[0], pairStr[1], !isObjStr)
case "!@", "nin":
return !isIn(pairStr[0], pairStr[1], !isObjStr)
case "^$", "regex":
return checkRegex(pairStr[1], pairStr[0])
case "0", "empty":
return v == nil
case "1", "nempty":
return v != nil
case "<<", "between":
return isBetween(pairNum[0], pairStr[1])
case "@@", "intersect":
return isIntersect(pairStr[1], pairStr[0])
default:
return false
}
}
func (r *Rule) checkIP(v interface{}) (bool, error) {
if strings.HasSuffix(strings.ToLower(r.Key), "ip") {
ver1, ok := r.Val.(string)
if ok {
ver2, ok := v.(string)
if ok {
if len(strings.Split(ver2, ".")) == 4 {
return CheckIP(ver2, ver1), nil
}
}
}
}
return false, errors.New("invalid ip")
}
func (r *Rule) checkSemver(v interface{}) (bool, error) {
op := r.Op
if strings.HasSuffix(strings.ToLower(r.Key), "version") {
ver1, ok := r.Val.(string)
if ok {
validate := true
ver2, ok := v.(string)
if ok {
/*v1len := len(strings.Split(ver1, "."))
v2len := len(strings.Split(ver2, "."))
if v1len > 4 || v2len > 4 {
validate = false
} else {
if v1len != v2len {
if v1len > v2len {
for i:=0; i< v1len - v2len; i++ {
//ver2 += ".0"
}
} else if v1len < v2len {
for i:=0; i< v2len - v1len; i++ {
//ver1 += ".0"
}
}
}
}*/
} else {
validate = false
}
if validate {
ver1 = repair(ver1)
constraint, err := semver.NewConstraint(op + " " + ver1)
if err == nil {
ver2 = repair(ver2)
version, err := semver.NewVersion(ver2)
if err == nil {
return constraint.Check(version), nil
}
}
}
}
}
return false, errors.New("invalid semver")
}
func repair(ver string) string {
if len(strings.Split(ver, ".")) > 3 {
//4位版本号,不含'-',转rc版本
if strings.Index(ver, "-") == -1 {
versions := strings.Split(ver, ".")
return versions[0] + "." + versions[1] + "." + versions[2] + "-rc." + versions[3]
}
}
return ver
}
func pluck(key string, o map[string]interface{}) interface{} {
if o == nil || key == EmptyStr {
return nil
}
paths := strings.Split(key, ".")
var ok bool
for index, step := range paths {
// last step is object key
if index == len(paths)-1 {
return o[step]
}
// explore deeper
if o, ok = o[step].(map[string]interface{}); !ok {
return nil
}
}
return nil
}
func formatNumber(v interface{}) float64 {
switch t := v.(type) {
case uint:
return float64(t)
case uint8:
return float64(t)
case uint16:
return float64(t)
case uint32:
return float64(t)
case uint64:
return float64(t)
case int:
return float64(t)
case int8:
return float64(t)
case int16:
return float64(t)
case int32:
return float64(t)
case int64:
return float64(t)
case float32:
return float64(t)
case float64:
return t
default:
return 0
}
}
func checkRegex(pattern, o string) bool {
regex, err := regexp.Compile(pattern)
if err != nil {
return false
}
return regex.MatchString(o)
}
func formatLogicExpression(strRawExpr string) string {
var flagPre, flagNow string
strBracket := "bracket"
strSpace := "space"
strNotSpace := "notSpace"
strOrigin := strings.ToLower(strRawExpr)
runesPretty := make([]rune, 0)
for _, c := range strOrigin {
if c <= '9' && c >= '0' {
flagNow = "num"
} else if c <= 'z' && c >= 'a' {
flagNow = "char"
} else if c == '(' || c == ')' {
flagNow = strBracket
} else {
flagNow = flagPre
}
if flagNow != flagPre || flagNow == strBracket && flagPre == strBracket {
// should insert space here
runesPretty = append(runesPretty, []rune(Space)[0])
}
runesPretty = append(runesPretty, c)
flagPre = flagNow
}
// remove redundant space
flagPre = strNotSpace
runesTrim := make([]rune, 0)
for _, c := range runesPretty {
if c == []rune(Space)[0] {
flagNow = strSpace
} else {
flagNow = strNotSpace
}
if flagNow == strSpace && flagPre == strSpace {
// continuous space
continue
} else {
runesTrim = append(runesTrim, c)
}
flagPre = flagNow
}
strPrettyTrim := string(runesTrim)
strPrettyTrim = strings.Trim(strPrettyTrim, Space)
return strPrettyTrim
}
func isFormatLogicExpressionAllValidSymbol(strFormatLogic string) bool {
listSymbol := strings.Split(strFormatLogic, Space)
for _, symbol := range listSymbol {
flag := false
regex, err := regexp.Compile(PatternNumber)
if err != nil {
return false
}
if regex.MatchString(symbol) {
// is number ok
continue
}
for _, op := range ValidOperators {
if op == symbol {
// is operator ok
flag = true
}
}
for _, v := range []string{"(", ")"} {
if v == symbol {
// is bracket ok
flag = true
}
}
if !flag {
return false
}
}
return true
}
func isFormatLogicExpressio | nAllIdsExist(strFormatLogic string | identifier_name | |
main.rs | renderer;
mod sampling;
mod scene;
mod shading;
mod surface;
mod timer;
mod tracer;
mod transform_stack;
use std::{fs::File, io, io::Read, mem, path::Path, str::FromStr};
use clap::{App, Arg};
use nom::bytes::complete::take_until;
use kioku::Arena;
use crate::{
accel::BVH4Node,
bbox::BBox,
parse::{parse_scene, DataTree},
renderer::LightPath,
surface::SurfaceIntersection,
timer::Timer,
};
const VERSION: &str = env!("CARGO_PKG_VERSION");
#[allow(clippy::cognitive_complexity)]
fn main() {
let mut t = Timer::new();
// Parse command line arguments.
let args = App::new("Psychopath")
.version(VERSION)
.about("A slightly psychotic path tracer")
.arg(
Arg::with_name("input")
.short("i")
.long("input")
.value_name("FILE")
.help("Input .psy file")
.takes_value(true)
.required_unless_one(&["dev", "use_stdin"]),
)
.arg(
Arg::with_name("spp")
.short("s")
.long("spp")
.value_name("N")
.help("Number of samples per pixel")
.takes_value(true)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
Arg::with_name("max_bucket_samples")
.short("b")
.long("spb")
.value_name("N")
.help("Target number of samples per bucket (determines bucket size)")
.takes_value(true)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
Arg::with_name("crop")
.long("crop")
.value_name("X1 Y1 X2 Y2")
.help(
"Only render the image between pixel coordinates (X1, Y1) \
and (X2, Y2). Coordinates are zero-indexed and inclusive.",
)
.takes_value(true)
.number_of_values(4)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be four integers".to_string()))
}),
)
.arg(
Arg::with_name("threads")
.short("t")
.long("threads")
.value_name("N")
.help(
"Number of threads to render with. Defaults to the number of logical \
cores on the system.",
)
.takes_value(true)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
Arg::with_name("stats")
.long("stats")
.help("Print additional statistics about rendering"),
)
.arg(
Arg::with_name("dev")
.long("dev")
.help("Show useful dev/debug info."),
)
.arg(
Arg::with_name("serialized_output")
.long("serialized_output")
.help("Serialize and send render output to standard output.")
.hidden(true),
)
.arg(
Arg::with_name("use_stdin")
.long("use_stdin")
.help("Take scene file in from stdin instead of a file path.")
.hidden(true),
)
.get_matches();
// Print some misc useful dev info.
if args.is_present("dev") {
println!(
"SurfaceIntersection size: {} bytes",
mem::size_of::<SurfaceIntersection>()
);
println!("LightPath size: {} bytes", mem::size_of::<LightPath>());
println!("BBox size: {} bytes", mem::size_of::<BBox>());
// println!("BVHNode size: {} bytes", mem::size_of::<BVHNode>());
println!("BVH4Node size: {} bytes", mem::size_of::<BVH4Node>());
return;
}
let crop = args.values_of("crop").map(|mut vals| {
let coords = (
u32::from_str(vals.next().unwrap()).unwrap(),
u32::from_str(vals.next().unwrap()).unwrap(),
u32::from_str(vals.next().unwrap()).unwrap(),
u32::from_str(vals.next().unwrap()).unwrap(),
);
if coords.0 > coords.2 {
panic!("Argument '--crop': X1 must be less than or equal to X2");
}
if coords.1 > coords.3 {
panic!("Argument '--crop': Y1 must be less than or equal to Y2");
}
coords
});
// Parse data tree of scene file
if !args.is_present("serialized_output") {
println!("Parsing scene file...",);
}
t.tick();
let psy_contents = if args.is_present("use_stdin") {
// Read from stdin
let mut input = Vec::new();
let tmp = std::io::stdin();
let mut stdin = tmp.lock();
let mut buf = vec![0u8; 4096];
loop {
let count = stdin
.read(&mut buf)
.expect("Unexpected end of scene input.");
let start = if input.len() < 11 {
0
} else {
input.len() - 11
};
let end = input.len() + count;
input.extend(&buf[..count]);
let mut done = false;
let mut trunc_len = 0;
if let nom::IResult::Ok((remaining, _)) =
take_until::<&str, &[u8], ()>("__PSY_EOF__")(&input[start..end])
{
done = true;
trunc_len = input.len() - remaining.len();
}
if done {
input.truncate(trunc_len);
break;
}
}
String::from_utf8(input).unwrap()
} else {
// Read from file
let mut input = String::new();
let fp = args.value_of("input").unwrap();
let mut f = io::BufReader::new(File::open(fp).unwrap());
let _ = f.read_to_string(&mut input);
input
};
let dt = DataTree::from_str(&psy_contents).unwrap();
if !args.is_present("serialized_output") {
println!("\tParsed scene file in {:.3}s", t.tick());
}
// Iterate through scenes and render them
if let DataTree::Internal { ref children, .. } = dt {
for child in children {
t.tick();
if child.type_name() == "Scene" {
if !args.is_present("serialized_output") {
println!("Building scene...");
}
let arena = Arena::new().with_block_size((1 << 20) * 4);
let mut r = parse_scene(&arena, child).unwrap_or_else(|e| {
e.print(&psy_contents);
panic!("Parse error.");
});
if let Some(spp) = args.value_of("spp") {
if !args.is_present("serialized_output") {
println!("\tOverriding scene spp: {}", spp);
}
r.spp = usize::from_str(spp).unwrap();
}
let max_samples_per_bucket =
if let Some(max_samples_per_bucket) = args.value_of("max_bucket_samples") {
u32::from_str(max_samples_per_bucket).unwrap()
} else {
4096
};
let thread_count = if let Some(threads) = args.value_of("threads") {
u32::from_str(threads).unwrap()
} else {
num_cpus::get() as u32
};
if !args.is_present("serialized_output") |
if !args.is_present("serialized_output") {
println!("Rendering scene with {} threads...", thread_count);
}
let (mut image, rstats) = r.render(
max_samples_per_bucket,
crop,
thread_count,
args.is_present("serialized_output"),
);
// Print render stats
if !args.is_present("serialized_output") {
let rtime = t.tick();
let ntime = rtime as f64 / rstats.total_time;
println!("\tRendered scene in {:.3}s", rtime);
println!(
"\t\tTrace: {:.3}s",
ntime * rstats.trace_time
);
println!("\t\t\tRays traced: {}", rstats.ray_count);
println!(
"\t\t\tRays/sec: {}",
(rstats.ray_count as f64 / (ntime * rstats.trace_time) as f64) as u64
);
println!("\t\t\tRay/node tests: {}", rstats.accel_node_visits);
println!(
"\t\tInitial ray generation: {:.3}s",
ntime * rstats.initial_ray_generation_time
);
| {
println!("\tBuilt scene in {:.3}s", t.tick());
} | conditional_block |
main.rs | renderer;
mod sampling;
mod scene;
mod shading;
mod surface;
mod timer;
mod tracer;
mod transform_stack;
use std::{fs::File, io, io::Read, mem, path::Path, str::FromStr};
use clap::{App, Arg};
use nom::bytes::complete::take_until;
use kioku::Arena;
use crate::{
accel::BVH4Node,
bbox::BBox,
parse::{parse_scene, DataTree},
renderer::LightPath,
surface::SurfaceIntersection,
timer::Timer,
};
const VERSION: &str = env!("CARGO_PKG_VERSION");
#[allow(clippy::cognitive_complexity)]
fn main() | .value_name("N")
.help("Number of samples per pixel")
.takes_value(true)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
Arg::with_name("max_bucket_samples")
.short("b")
.long("spb")
.value_name("N")
.help("Target number of samples per bucket (determines bucket size)")
.takes_value(true)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
Arg::with_name("crop")
.long("crop")
.value_name("X1 Y1 X2 Y2")
.help(
"Only render the image between pixel coordinates (X1, Y1) \
and (X2, Y2). Coordinates are zero-indexed and inclusive.",
)
.takes_value(true)
.number_of_values(4)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be four integers".to_string()))
}),
)
.arg(
Arg::with_name("threads")
.short("t")
.long("threads")
.value_name("N")
.help(
"Number of threads to render with. Defaults to the number of logical \
cores on the system.",
)
.takes_value(true)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
Arg::with_name("stats")
.long("stats")
.help("Print additional statistics about rendering"),
)
.arg(
Arg::with_name("dev")
.long("dev")
.help("Show useful dev/debug info."),
)
.arg(
Arg::with_name("serialized_output")
.long("serialized_output")
.help("Serialize and send render output to standard output.")
.hidden(true),
)
.arg(
Arg::with_name("use_stdin")
.long("use_stdin")
.help("Take scene file in from stdin instead of a file path.")
.hidden(true),
)
.get_matches();
// Print some misc useful dev info.
if args.is_present("dev") {
println!(
"SurfaceIntersection size: {} bytes",
mem::size_of::<SurfaceIntersection>()
);
println!("LightPath size: {} bytes", mem::size_of::<LightPath>());
println!("BBox size: {} bytes", mem::size_of::<BBox>());
// println!("BVHNode size: {} bytes", mem::size_of::<BVHNode>());
println!("BVH4Node size: {} bytes", mem::size_of::<BVH4Node>());
return;
}
let crop = args.values_of("crop").map(|mut vals| {
let coords = (
u32::from_str(vals.next().unwrap()).unwrap(),
u32::from_str(vals.next().unwrap()).unwrap(),
u32::from_str(vals.next().unwrap()).unwrap(),
u32::from_str(vals.next().unwrap()).unwrap(),
);
if coords.0 > coords.2 {
panic!("Argument '--crop': X1 must be less than or equal to X2");
}
if coords.1 > coords.3 {
panic!("Argument '--crop': Y1 must be less than or equal to Y2");
}
coords
});
// Parse data tree of scene file
if !args.is_present("serialized_output") {
println!("Parsing scene file...",);
}
t.tick();
let psy_contents = if args.is_present("use_stdin") {
// Read from stdin
let mut input = Vec::new();
let tmp = std::io::stdin();
let mut stdin = tmp.lock();
let mut buf = vec![0u8; 4096];
loop {
let count = stdin
.read(&mut buf)
.expect("Unexpected end of scene input.");
let start = if input.len() < 11 {
0
} else {
input.len() - 11
};
let end = input.len() + count;
input.extend(&buf[..count]);
let mut done = false;
let mut trunc_len = 0;
if let nom::IResult::Ok((remaining, _)) =
take_until::<&str, &[u8], ()>("__PSY_EOF__")(&input[start..end])
{
done = true;
trunc_len = input.len() - remaining.len();
}
if done {
input.truncate(trunc_len);
break;
}
}
String::from_utf8(input).unwrap()
} else {
// Read from file
let mut input = String::new();
let fp = args.value_of("input").unwrap();
let mut f = io::BufReader::new(File::open(fp).unwrap());
let _ = f.read_to_string(&mut input);
input
};
let dt = DataTree::from_str(&psy_contents).unwrap();
if !args.is_present("serialized_output") {
println!("\tParsed scene file in {:.3}s", t.tick());
}
// Iterate through scenes and render them
if let DataTree::Internal { ref children, .. } = dt {
for child in children {
t.tick();
if child.type_name() == "Scene" {
if !args.is_present("serialized_output") {
println!("Building scene...");
}
let arena = Arena::new().with_block_size((1 << 20) * 4);
let mut r = parse_scene(&arena, child).unwrap_or_else(|e| {
e.print(&psy_contents);
panic!("Parse error.");
});
if let Some(spp) = args.value_of("spp") {
if !args.is_present("serialized_output") {
println!("\tOverriding scene spp: {}", spp);
}
r.spp = usize::from_str(spp).unwrap();
}
let max_samples_per_bucket =
if let Some(max_samples_per_bucket) = args.value_of("max_bucket_samples") {
u32::from_str(max_samples_per_bucket).unwrap()
} else {
4096
};
let thread_count = if let Some(threads) = args.value_of("threads") {
u32::from_str(threads).unwrap()
} else {
num_cpus::get() as u32
};
if !args.is_present("serialized_output") {
println!("\tBuilt scene in {:.3}s", t.tick());
}
if !args.is_present("serialized_output") {
println!("Rendering scene with {} threads...", thread_count);
}
let (mut image, rstats) = r.render(
max_samples_per_bucket,
crop,
thread_count,
args.is_present("serialized_output"),
);
// Print render stats
if !args.is_present("serialized_output") {
let rtime = t.tick();
let ntime = rtime as f64 / rstats.total_time;
println!("\tRendered scene in {:.3}s", rtime);
println!(
"\t\tTrace: {:.3}s",
ntime * rstats.trace_time
);
println!("\t\t\tRays traced: {}", rstats.ray_count);
println!(
"\t\t\tRays/sec: {}",
(rstats.ray_count as f64 / (ntime * rstats.trace_time) as f64) as u64
);
println!("\t\t\tRay/node tests: {}", rstats.accel_node_visits);
println!(
"\t\tInitial ray generation: {:.3}s",
ntime * rstats.initial_ray_generation_time
);
| {
let mut t = Timer::new();
// Parse command line arguments.
let args = App::new("Psychopath")
.version(VERSION)
.about("A slightly psychotic path tracer")
.arg(
Arg::with_name("input")
.short("i")
.long("input")
.value_name("FILE")
.help("Input .psy file")
.takes_value(true)
.required_unless_one(&["dev", "use_stdin"]),
)
.arg(
Arg::with_name("spp")
.short("s")
.long("spp") | identifier_body |
main.rs | renderer;
mod sampling;
mod scene;
mod shading;
mod surface;
mod timer;
mod tracer;
mod transform_stack;
use std::{fs::File, io, io::Read, mem, path::Path, str::FromStr};
use clap::{App, Arg};
use nom::bytes::complete::take_until;
use kioku::Arena;
use crate::{
accel::BVH4Node,
bbox::BBox,
parse::{parse_scene, DataTree},
renderer::LightPath,
surface::SurfaceIntersection,
timer::Timer,
};
const VERSION: &str = env!("CARGO_PKG_VERSION");
#[allow(clippy::cognitive_complexity)]
fn | () {
let mut t = Timer::new();
// Parse command line arguments.
let args = App::new("Psychopath")
.version(VERSION)
.about("A slightly psychotic path tracer")
.arg(
Arg::with_name("input")
.short("i")
.long("input")
.value_name("FILE")
.help("Input .psy file")
.takes_value(true)
.required_unless_one(&["dev", "use_stdin"]),
)
.arg(
Arg::with_name("spp")
.short("s")
.long("spp")
.value_name("N")
.help("Number of samples per pixel")
.takes_value(true)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
Arg::with_name("max_bucket_samples")
.short("b")
.long("spb")
.value_name("N")
.help("Target number of samples per bucket (determines bucket size)")
.takes_value(true)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
Arg::with_name("crop")
.long("crop")
.value_name("X1 Y1 X2 Y2")
.help(
"Only render the image between pixel coordinates (X1, Y1) \
and (X2, Y2). Coordinates are zero-indexed and inclusive.",
)
.takes_value(true)
.number_of_values(4)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be four integers".to_string()))
}),
)
.arg(
Arg::with_name("threads")
.short("t")
.long("threads")
.value_name("N")
.help(
"Number of threads to render with. Defaults to the number of logical \
cores on the system.",
)
.takes_value(true)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
Arg::with_name("stats")
.long("stats")
.help("Print additional statistics about rendering"),
)
.arg(
Arg::with_name("dev")
.long("dev")
.help("Show useful dev/debug info."),
)
.arg(
Arg::with_name("serialized_output")
.long("serialized_output")
.help("Serialize and send render output to standard output.")
.hidden(true),
)
.arg(
Arg::with_name("use_stdin")
.long("use_stdin")
.help("Take scene file in from stdin instead of a file path.")
.hidden(true),
)
.get_matches();
// Print some misc useful dev info.
if args.is_present("dev") {
println!(
"SurfaceIntersection size: {} bytes",
mem::size_of::<SurfaceIntersection>()
);
println!("LightPath size: {} bytes", mem::size_of::<LightPath>());
println!("BBox size: {} bytes", mem::size_of::<BBox>());
// println!("BVHNode size: {} bytes", mem::size_of::<BVHNode>());
println!("BVH4Node size: {} bytes", mem::size_of::<BVH4Node>());
return;
}
let crop = args.values_of("crop").map(|mut vals| {
let coords = (
u32::from_str(vals.next().unwrap()).unwrap(),
u32::from_str(vals.next().unwrap()).unwrap(),
u32::from_str(vals.next().unwrap()).unwrap(),
u32::from_str(vals.next().unwrap()).unwrap(),
);
if coords.0 > coords.2 {
panic!("Argument '--crop': X1 must be less than or equal to X2");
}
if coords.1 > coords.3 {
panic!("Argument '--crop': Y1 must be less than or equal to Y2");
}
coords
});
// Parse data tree of scene file
if !args.is_present("serialized_output") {
println!("Parsing scene file...",);
}
t.tick();
let psy_contents = if args.is_present("use_stdin") {
// Read from stdin
let mut input = Vec::new();
let tmp = std::io::stdin();
let mut stdin = tmp.lock();
let mut buf = vec![0u8; 4096];
loop {
let count = stdin
.read(&mut buf)
.expect("Unexpected end of scene input.");
let start = if input.len() < 11 {
0
} else {
input.len() - 11
};
let end = input.len() + count;
input.extend(&buf[..count]);
let mut done = false;
let mut trunc_len = 0;
if let nom::IResult::Ok((remaining, _)) =
take_until::<&str, &[u8], ()>("__PSY_EOF__")(&input[start..end])
{
done = true;
trunc_len = input.len() - remaining.len();
}
if done {
input.truncate(trunc_len);
break;
}
}
String::from_utf8(input).unwrap()
} else {
// Read from file
let mut input = String::new();
let fp = args.value_of("input").unwrap();
let mut f = io::BufReader::new(File::open(fp).unwrap());
let _ = f.read_to_string(&mut input);
input
};
let dt = DataTree::from_str(&psy_contents).unwrap();
if !args.is_present("serialized_output") {
println!("\tParsed scene file in {:.3}s", t.tick());
}
// Iterate through scenes and render them
if let DataTree::Internal { ref children, .. } = dt {
for child in children {
t.tick();
if child.type_name() == "Scene" {
if !args.is_present("serialized_output") {
println!("Building scene...");
}
let arena = Arena::new().with_block_size((1 << 20) * 4);
let mut r = parse_scene(&arena, child).unwrap_or_else(|e| {
e.print(&psy_contents);
panic!("Parse error.");
});
if let Some(spp) = args.value_of("spp") {
if !args.is_present("serialized_output") {
println!("\tOverriding scene spp: {}", spp);
}
r.spp = usize::from_str(spp).unwrap();
}
let max_samples_per_bucket =
if let Some(max_samples_per_bucket) = args.value_of("max_bucket_samples") {
u32::from_str(max_samples_per_bucket).unwrap()
} else {
4096
};
let thread_count = if let Some(threads) = args.value_of("threads") {
u32::from_str(threads).unwrap()
} else {
num_cpus::get() as u32
};
if !args.is_present("serialized_output") {
println!("\tBuilt scene in {:.3}s", t.tick());
}
if !args.is_present("serialized_output") {
println!("Rendering scene with {} threads...", thread_count);
}
let (mut image, rstats) = r.render(
max_samples_per_bucket,
crop,
thread_count,
args.is_present("serialized_output"),
);
// Print render stats
if !args.is_present("serialized_output") {
let rtime = t.tick();
let ntime = rtime as f64 / rstats.total_time;
println!("\tRendered scene in {:.3}s", rtime);
println!(
"\t\tTrace: {:.3}s",
ntime * rstats.trace_time
);
println!("\t\t\tRays traced: {}", rstats.ray_count);
println!(
"\t\t\tRays/sec: {}",
(rstats.ray_count as f64 / (ntime * rstats.trace_time) as f64) as u64
);
println!("\t\t\tRay/node tests: {}", rstats.accel_node_visits);
println!(
"\t\tInitial ray generation: {:.3}s",
ntime * rstats.initial_ray_generation_time
);
| main | identifier_name |
main.rs | renderer;
mod sampling;
mod scene;
mod shading;
mod surface;
mod timer;
mod tracer;
mod transform_stack;
use std::{fs::File, io, io::Read, mem, path::Path, str::FromStr};
use clap::{App, Arg};
use nom::bytes::complete::take_until;
use kioku::Arena;
use crate::{
accel::BVH4Node,
bbox::BBox,
parse::{parse_scene, DataTree},
renderer::LightPath,
surface::SurfaceIntersection,
timer::Timer,
};
const VERSION: &str = env!("CARGO_PKG_VERSION");
#[allow(clippy::cognitive_complexity)]
fn main() {
let mut t = Timer::new();
// Parse command line arguments.
let args = App::new("Psychopath")
.version(VERSION)
.about("A slightly psychotic path tracer")
.arg(
Arg::with_name("input")
.short("i")
.long("input")
.value_name("FILE")
.help("Input .psy file")
.takes_value(true)
.required_unless_one(&["dev", "use_stdin"]),
)
.arg(
Arg::with_name("spp")
.short("s")
.long("spp")
.value_name("N")
.help("Number of samples per pixel")
.takes_value(true)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
Arg::with_name("max_bucket_samples")
.short("b")
.long("spb")
.value_name("N")
.help("Target number of samples per bucket (determines bucket size)")
.takes_value(true)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
Arg::with_name("crop")
.long("crop")
.value_name("X1 Y1 X2 Y2")
.help(
"Only render the image between pixel coordinates (X1, Y1) \
and (X2, Y2). Coordinates are zero-indexed and inclusive.",
)
.takes_value(true)
.number_of_values(4)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be four integers".to_string()))
}),
)
.arg(
Arg::with_name("threads")
.short("t")
.long("threads")
.value_name("N")
.help(
"Number of threads to render with. Defaults to the number of logical \
cores on the system.",
)
.takes_value(true)
.validator(|s| {
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
Arg::with_name("stats")
.long("stats")
.help("Print additional statistics about rendering"),
)
.arg(
Arg::with_name("dev")
.long("dev")
.help("Show useful dev/debug info."),
)
.arg(
Arg::with_name("serialized_output")
.long("serialized_output")
.help("Serialize and send render output to standard output.")
.hidden(true),
)
.arg(
Arg::with_name("use_stdin")
.long("use_stdin")
.help("Take scene file in from stdin instead of a file path.")
.hidden(true),
)
.get_matches();
// Print some misc useful dev info.
if args.is_present("dev") {
println!(
"SurfaceIntersection size: {} bytes",
mem::size_of::<SurfaceIntersection>()
);
println!("LightPath size: {} bytes", mem::size_of::<LightPath>());
println!("BBox size: {} bytes", mem::size_of::<BBox>());
// println!("BVHNode size: {} bytes", mem::size_of::<BVHNode>());
println!("BVH4Node size: {} bytes", mem::size_of::<BVH4Node>());
return;
}
let crop = args.values_of("crop").map(|mut vals| {
let coords = (
u32::from_str(vals.next().unwrap()).unwrap(),
u32::from_str(vals.next().unwrap()).unwrap(),
u32::from_str(vals.next().unwrap()).unwrap(),
u32::from_str(vals.next().unwrap()).unwrap(),
);
if coords.0 > coords.2 {
panic!("Argument '--crop': X1 must be less than or equal to X2");
}
if coords.1 > coords.3 {
panic!("Argument '--crop': Y1 must be less than or equal to Y2");
}
coords
});
// Parse data tree of scene file
if !args.is_present("serialized_output") {
println!("Parsing scene file...",);
}
t.tick();
let psy_contents = if args.is_present("use_stdin") {
// Read from stdin
let mut input = Vec::new();
let tmp = std::io::stdin();
let mut stdin = tmp.lock();
let mut buf = vec![0u8; 4096];
loop {
let count = stdin
.read(&mut buf)
.expect("Unexpected end of scene input.");
let start = if input.len() < 11 {
0
} else {
input.len() - 11
};
let end = input.len() + count;
input.extend(&buf[..count]);
let mut done = false;
let mut trunc_len = 0;
if let nom::IResult::Ok((remaining, _)) =
take_until::<&str, &[u8], ()>("__PSY_EOF__")(&input[start..end])
{
done = true;
trunc_len = input.len() - remaining.len();
}
if done {
input.truncate(trunc_len);
break;
}
}
String::from_utf8(input).unwrap()
} else {
// Read from file
let mut input = String::new();
let fp = args.value_of("input").unwrap();
let mut f = io::BufReader::new(File::open(fp).unwrap());
let _ = f.read_to_string(&mut input);
input
};
let dt = DataTree::from_str(&psy_contents).unwrap();
if !args.is_present("serialized_output") {
println!("\tParsed scene file in {:.3}s", t.tick());
}
// Iterate through scenes and render them
if let DataTree::Internal { ref children, .. } = dt {
for child in children {
t.tick();
if child.type_name() == "Scene" { | let arena = Arena::new().with_block_size((1 << 20) * 4);
let mut r = parse_scene(&arena, child).unwrap_or_else(|e| {
e.print(&psy_contents);
panic!("Parse error.");
});
if let Some(spp) = args.value_of("spp") {
if !args.is_present("serialized_output") {
println!("\tOverriding scene spp: {}", spp);
}
r.spp = usize::from_str(spp).unwrap();
}
let max_samples_per_bucket =
if let Some(max_samples_per_bucket) = args.value_of("max_bucket_samples") {
u32::from_str(max_samples_per_bucket).unwrap()
} else {
4096
};
let thread_count = if let Some(threads) = args.value_of("threads") {
u32::from_str(threads).unwrap()
} else {
num_cpus::get() as u32
};
if !args.is_present("serialized_output") {
println!("\tBuilt scene in {:.3}s", t.tick());
}
if !args.is_present("serialized_output") {
println!("Rendering scene with {} threads...", thread_count);
}
let (mut image, rstats) = r.render(
max_samples_per_bucket,
crop,
thread_count,
args.is_present("serialized_output"),
);
// Print render stats
if !args.is_present("serialized_output") {
let rtime = t.tick();
let ntime = rtime as f64 / rstats.total_time;
println!("\tRendered scene in {:.3}s", rtime);
println!(
"\t\tTrace: {:.3}s",
ntime * rstats.trace_time
);
println!("\t\t\tRays traced: {}", rstats.ray_count);
println!(
"\t\t\tRays/sec: {}",
(rstats.ray_count as f64 / (ntime * rstats.trace_time) as f64) as u64
);
println!("\t\t\tRay/node tests: {}", rstats.accel_node_visits);
println!(
"\t\tInitial ray generation: {:.3}s",
ntime * rstats.initial_ray_generation_time
);
println | if !args.is_present("serialized_output") {
println!("Building scene...");
}
| random_line_split |
index.ts | import { fromCognitoIdentityPool } from "@aws-sdk/credential-providers";
import { CognitoIdentityCredentials } from "@aws-sdk/credential-provider-cognito-identity/dist-types/fromCognitoIdentity"
// @ts-ignore
import Settings = require('./settings');
import {toUtf8} from "@aws-sdk/util-utf8-browser";
const $: JQueryStatic = jquery;
function log(msg: string) {
let now = new Date();
$('#console').append(`<pre>${now.toString()}: ${msg}</pre>`);
}
// AWSCognitoCredentialOptions. The credentials options used to create AWSCongnitoCredentialProvider.
interface AWSCognitoCredentialOptions
{
IdentityPoolId : string,
Region: string
}
// AWSCognitoCredentialsProvider. The AWSCognitoCredentialsProvider implements AWS.CognitoIdentityCredentials.
class AWSCognitoCredentialsProvider extends auth.CredentialsProvider{
private options: AWSCognitoCredentialOptions;
private cachedCredentials? : CognitoIdentityCredentials;
constructor(options: AWSCognitoCredentialOptions, expire_interval_in_ms? : number)
{
super();
this.options = options;
setInterval(async ()=>{
await this.refreshCredentials();
},expire_interval_in_ms?? 3600*1000);
}
getCredentials() : auth.AWSCredentials {
return {
aws_access_id: this.cachedCredentials?.accessKeyId ?? "",
aws_secret_key: this.cachedCredentials?.secretAccessKey ?? "",
aws_sts_token: this.cachedCredentials?.sessionToken,
aws_region: this.options.Region
}
}
async refreshCredentials() {
log('Fetching Cognito credentials');
this.cachedCredentials = await fromCognitoIdentityPool({
// Required. The unique identifier for the identity pool from which an identity should be
// retrieved or generated.
identityPoolId: this.options.IdentityPoolId,
clientConfig: { region: this.options.Region },
})();
}
}
// For the purposes of this sample, we need to associate certain variables with a particular MQTT5 client
// and to do so we use this class to hold all the data for a particular client used in the sample.
class SampleMqtt5Client {
client? : mqtt5.Mqtt5Client;
name? : string;
// Sets up the MQTT5 sample client using direct MQTT5 via mTLS with the passed input data.
public setupMqtt5Client(
provider: AWSCognitoCredentialsProvider,
input_endpoint : string, input_region: string, input_clientId : string, input_clientName : string)
{
this.name = input_clientName;
let wsConfig : iot.WebsocketSigv4Config = {
credentialsProvider: provider,
region: input_region
}
let builder: iot.AwsIotMqtt5ClientConfigBuilder = iot.AwsIotMqtt5ClientConfigBuilder.newWebsocketMqttBuilderWithSigv4Auth(
input_endpoint,
wsConfig
)
builder.withConnectProperties({
clientId: input_clientId,
keepAliveIntervalSeconds: 120
})
this.client = new mqtt5.Mqtt5Client(builder.build());
// Invoked when the client has an error
this.client.on('error', (error) => {
log("[" + this.name + "] Error: " + error.toString());
});
// Invoked when the client gets a message/publish on a subscribed topic
this.client.on("messageReceived",(eventData: mqtt5.MessageReceivedEvent) : void => {
log("[" + this.name + "]: Received a publish");
if (eventData.message.topicName) {
log("\tPublish received on topic: " + eventData.message.topicName);
}
if (eventData.message.payload) {
log("\tMessage: " + toUtf8(new Uint8Array(eventData.message.payload as ArrayBuffer)));
}
});
// Invoked when the client connects successfully to the endpoint
this.client.on('connectionSuccess', (eventData: mqtt5.ConnectionSuccessEvent) => {
log("[" + this.name + "]: Connection success");
});
// Invoked when the client fails to connect to the endpoint
this.client.on('connectionFailure', (eventData: mqtt5.ConnectionFailureEvent) => {
log("[" + this.name + "]: Connection failed with error: " + eventData.error.toString());
});
// Invoked when the client becomes disconnected
this.client.on('disconnection', (eventData: mqtt5.DisconnectionEvent) => {
log("[" + this.name + "]: Disconnected");
if (eventData.disconnect) {
if (eventData.disconnect.reasonCode == mqtt5.DisconnectReasonCode.SharedSubscriptionsNotSupported) |
}
});
// Invoked when the client stops
this.client.on('stopped', (eventData: mqtt5.StoppedEvent) => {
log("[" + this.name + "]: Stopped");
});
}
// Helper function to make sample code a little cleaner
public async startClient() {
const connectionSuccess = once(this.client as mqtt5.Mqtt5Client, "connectionSuccess");
this.client?.start();
await connectionSuccess;
}
// Helper function to make sample code a little cleaner
public async stopClient() {
const stopped = once(this.client as mqtt5.Mqtt5Client, "stopped");
this.client?.stop();
await stopped;
}
}
async function runSample() {
// Pull data from the command line
let input_endpoint : string = Settings.AWS_IOT_ENDPOINT;
let input_region : string = Settings.AWS_REGION;
let input_clientId : string = Settings.INPUT_CLIENT_ID;
let input_topic : string = Settings.INPUT_TOPIC;
let input_count : number = Settings.INPUT_COUNT;
let input_message : string = Settings.INPUT_MESSAGE;
let input_groupIdentifier : string = Settings.INPUT_GROUP_IDENTIFIER;
let input_cognitoIdentityPoolId = Settings.AWS_COGNITO_IDENTITY_POOL_ID;
// Construct the shared topic
let input_shared_topic : string = "$share/" + input_groupIdentifier + "/" + input_topic;
/** Set up the credentialsProvider */
const provider = new AWSCognitoCredentialsProvider({
IdentityPoolId: input_cognitoIdentityPoolId,
Region: input_region});
/** Make sure the credential provider fetched before setup the connection */
await provider.refreshCredentials();
// Create the MQTT5 clients: one publisher and two subscribers
let publisher : SampleMqtt5Client = new SampleMqtt5Client()
publisher.setupMqtt5Client(provider, input_endpoint, input_region, input_clientId + "1", "Publisher");
let subscriber_one : SampleMqtt5Client = new SampleMqtt5Client()
subscriber_one.setupMqtt5Client(provider, input_endpoint, input_region, input_clientId + "2", "Subscriber One");
let subscriber_two : SampleMqtt5Client = new SampleMqtt5Client()
subscriber_two.setupMqtt5Client(provider, input_endpoint, input_region, input_clientId + "3", "Subscriber Two");
try
{
// Connect all the clients
await publisher.startClient();
await subscriber_one.startClient();
await subscriber_two.startClient();
// Subscribe to the shared topic on the two subscribers
await subscriber_one.client?.subscribe({subscriptions: [{qos: mqtt5.QoS.AtLeastOnce, topicFilter: input_shared_topic }]});
log("[" + subscriber_one.name + "]: Subscribed to topic '" + input_topic + "' in shared subscription group '" + input_groupIdentifier + "'.");
log("[" + subscriber_one.name + "]: Full subscribed topic is '" + input_shared_topic + "'.");
await subscriber_two.client?.subscribe({subscriptions: [{qos: mqtt5.QoS.AtLeastOnce, topicFilter: input_shared_topic }]});
log("[" + subscriber_two.name + "]: Subscribed to topic '" + input_topic + "' in shared subscription group '" + input_groupIdentifier + "'.");
log("[" + subscriber_two.name + "]: Full subscribed topic is '" + input_shared_topic + "'.");
// Publish using the publisher client
let publishPacket : mqtt5.PublishPacket = {
qos: mqtt5.QoS.AtLeastOnce,
topicName: input_topic,
payload: input_message
};
if (input_count > 0) {
let count = 0;
while (count++ < input_count) {
publishPacket.payload = input_message + ": " + count;
await publisher.client?.publish(publishPacket);
log("[" + publisher.name + "]: Published");
await new Promise(resolve => setTimeout(resolve, 1000));
}
// Wait 5 seconds to let the last publish go out before unsubscribing.
await new Promise(resolve => setTimeout(resolve, 5000));
} else {
log("Skipping publishing messages due to message count being zero...");
}
// Unsubscribe from the shared topic on the two subscribers
await subscriber_one.client?.unsubscribe({topicFilters: [ input_shared_topic ]});
log("[" + subscriber_one.name + "]: Unsubscribed to topic '" + input_topic + "' in shared subscription group '" + input_groupIdentifier + "'.");
log("[" + subscriber_one.name + "]: Full unsubscribed topic is '" + input_shared_topic + "'.");
await subscriber_two.client?.unsubscribe({topicFilters: [ input_shared_topic ]});
log("[" | {
log(
"[" + this.name + "]: Shared Subscriptions not supported!" +
"\nThis sample will not work unless the endpoint being connected to has Shared Subscriptions support.");
} | conditional_block |
index.ts | import { fromCognitoIdentityPool } from "@aws-sdk/credential-providers";
import { CognitoIdentityCredentials } from "@aws-sdk/credential-provider-cognito-identity/dist-types/fromCognitoIdentity"
// @ts-ignore
import Settings = require('./settings');
import {toUtf8} from "@aws-sdk/util-utf8-browser";
const $: JQueryStatic = jquery;
function log(msg: string) |
// AWSCognitoCredentialOptions. The credentials options used to create AWSCongnitoCredentialProvider.
interface AWSCognitoCredentialOptions
{
IdentityPoolId : string,
Region: string
}
// AWSCognitoCredentialsProvider. The AWSCognitoCredentialsProvider implements AWS.CognitoIdentityCredentials.
class AWSCognitoCredentialsProvider extends auth.CredentialsProvider{
private options: AWSCognitoCredentialOptions;
private cachedCredentials? : CognitoIdentityCredentials;
constructor(options: AWSCognitoCredentialOptions, expire_interval_in_ms? : number)
{
super();
this.options = options;
setInterval(async ()=>{
await this.refreshCredentials();
},expire_interval_in_ms?? 3600*1000);
}
getCredentials() : auth.AWSCredentials {
return {
aws_access_id: this.cachedCredentials?.accessKeyId ?? "",
aws_secret_key: this.cachedCredentials?.secretAccessKey ?? "",
aws_sts_token: this.cachedCredentials?.sessionToken,
aws_region: this.options.Region
}
}
async refreshCredentials() {
log('Fetching Cognito credentials');
this.cachedCredentials = await fromCognitoIdentityPool({
// Required. The unique identifier for the identity pool from which an identity should be
// retrieved or generated.
identityPoolId: this.options.IdentityPoolId,
clientConfig: { region: this.options.Region },
})();
}
}
// For the purposes of this sample, we need to associate certain variables with a particular MQTT5 client
// and to do so we use this class to hold all the data for a particular client used in the sample.
class SampleMqtt5Client {
client? : mqtt5.Mqtt5Client;
name? : string;
// Sets up the MQTT5 sample client using direct MQTT5 via mTLS with the passed input data.
public setupMqtt5Client(
provider: AWSCognitoCredentialsProvider,
input_endpoint : string, input_region: string, input_clientId : string, input_clientName : string)
{
this.name = input_clientName;
let wsConfig : iot.WebsocketSigv4Config = {
credentialsProvider: provider,
region: input_region
}
let builder: iot.AwsIotMqtt5ClientConfigBuilder = iot.AwsIotMqtt5ClientConfigBuilder.newWebsocketMqttBuilderWithSigv4Auth(
input_endpoint,
wsConfig
)
builder.withConnectProperties({
clientId: input_clientId,
keepAliveIntervalSeconds: 120
})
this.client = new mqtt5.Mqtt5Client(builder.build());
// Invoked when the client has an error
this.client.on('error', (error) => {
log("[" + this.name + "] Error: " + error.toString());
});
// Invoked when the client gets a message/publish on a subscribed topic
this.client.on("messageReceived",(eventData: mqtt5.MessageReceivedEvent) : void => {
log("[" + this.name + "]: Received a publish");
if (eventData.message.topicName) {
log("\tPublish received on topic: " + eventData.message.topicName);
}
if (eventData.message.payload) {
log("\tMessage: " + toUtf8(new Uint8Array(eventData.message.payload as ArrayBuffer)));
}
});
// Invoked when the client connects successfully to the endpoint
this.client.on('connectionSuccess', (eventData: mqtt5.ConnectionSuccessEvent) => {
log("[" + this.name + "]: Connection success");
});
// Invoked when the client fails to connect to the endpoint
this.client.on('connectionFailure', (eventData: mqtt5.ConnectionFailureEvent) => {
log("[" + this.name + "]: Connection failed with error: " + eventData.error.toString());
});
// Invoked when the client becomes disconnected
this.client.on('disconnection', (eventData: mqtt5.DisconnectionEvent) => {
log("[" + this.name + "]: Disconnected");
if (eventData.disconnect) {
if (eventData.disconnect.reasonCode == mqtt5.DisconnectReasonCode.SharedSubscriptionsNotSupported) {
log(
"[" + this.name + "]: Shared Subscriptions not supported!" +
"\nThis sample will not work unless the endpoint being connected to has Shared Subscriptions support.");
}
}
});
// Invoked when the client stops
this.client.on('stopped', (eventData: mqtt5.StoppedEvent) => {
log("[" + this.name + "]: Stopped");
});
}
// Helper function to make sample code a little cleaner
public async startClient() {
const connectionSuccess = once(this.client as mqtt5.Mqtt5Client, "connectionSuccess");
this.client?.start();
await connectionSuccess;
}
// Helper function to make sample code a little cleaner
public async stopClient() {
const stopped = once(this.client as mqtt5.Mqtt5Client, "stopped");
this.client?.stop();
await stopped;
}
}
async function runSample() {
// Pull data from the command line
let input_endpoint : string = Settings.AWS_IOT_ENDPOINT;
let input_region : string = Settings.AWS_REGION;
let input_clientId : string = Settings.INPUT_CLIENT_ID;
let input_topic : string = Settings.INPUT_TOPIC;
let input_count : number = Settings.INPUT_COUNT;
let input_message : string = Settings.INPUT_MESSAGE;
let input_groupIdentifier : string = Settings.INPUT_GROUP_IDENTIFIER;
let input_cognitoIdentityPoolId = Settings.AWS_COGNITO_IDENTITY_POOL_ID;
// Construct the shared topic
let input_shared_topic : string = "$share/" + input_groupIdentifier + "/" + input_topic;
/** Set up the credentialsProvider */
const provider = new AWSCognitoCredentialsProvider({
IdentityPoolId: input_cognitoIdentityPoolId,
Region: input_region});
/** Make sure the credential provider fetched before setup the connection */
await provider.refreshCredentials();
// Create the MQTT5 clients: one publisher and two subscribers
let publisher : SampleMqtt5Client = new SampleMqtt5Client()
publisher.setupMqtt5Client(provider, input_endpoint, input_region, input_clientId + "1", "Publisher");
let subscriber_one : SampleMqtt5Client = new SampleMqtt5Client()
subscriber_one.setupMqtt5Client(provider, input_endpoint, input_region, input_clientId + "2", "Subscriber One");
let subscriber_two : SampleMqtt5Client = new SampleMqtt5Client()
subscriber_two.setupMqtt5Client(provider, input_endpoint, input_region, input_clientId + "3", "Subscriber Two");
try
{
// Connect all the clients
await publisher.startClient();
await subscriber_one.startClient();
await subscriber_two.startClient();
// Subscribe to the shared topic on the two subscribers
await subscriber_one.client?.subscribe({subscriptions: [{qos: mqtt5.QoS.AtLeastOnce, topicFilter: input_shared_topic }]});
log("[" + subscriber_one.name + "]: Subscribed to topic '" + input_topic + "' in shared subscription group '" + input_groupIdentifier + "'.");
log("[" + subscriber_one.name + "]: Full subscribed topic is '" + input_shared_topic + "'.");
await subscriber_two.client?.subscribe({subscriptions: [{qos: mqtt5.QoS.AtLeastOnce, topicFilter: input_shared_topic }]});
log("[" + subscriber_two.name + "]: Subscribed to topic '" + input_topic + "' in shared subscription group '" + input_groupIdentifier + "'.");
log("[" + subscriber_two.name + "]: Full subscribed topic is '" + input_shared_topic + "'.");
// Publish using the publisher client
let publishPacket : mqtt5.PublishPacket = {
qos: mqtt5.QoS.AtLeastOnce,
topicName: input_topic,
payload: input_message
};
if (input_count > 0) {
let count = 0;
while (count++ < input_count) {
publishPacket.payload = input_message + ": " + count;
await publisher.client?.publish(publishPacket);
log("[" + publisher.name + "]: Published");
await new Promise(resolve => setTimeout(resolve, 1000));
}
// Wait 5 seconds to let the last publish go out before unsubscribing.
await new Promise(resolve => setTimeout(resolve, 5000));
} else {
log("Skipping publishing messages due to message count being zero...");
}
// Unsubscribe from the shared topic on the two subscribers
await subscriber_one.client?.unsubscribe({topicFilters: [ input_shared_topic ]});
log("[" + subscriber_one.name + "]: Unsubscribed to topic '" + input_topic + "' in shared subscription group '" + input_groupIdentifier + "'.");
log("[" + subscriber_one.name + "]: Full unsubscribed topic is '" + input_shared_topic + "'.");
await subscriber_two.client?.unsubscribe({topicFilters: [ input_shared_topic ]});
log("[" | {
let now = new Date();
$('#console').append(`<pre>${now.toString()}: ${msg}</pre>`);
} | identifier_body |
index.ts | import { fromCognitoIdentityPool } from "@aws-sdk/credential-providers";
import { CognitoIdentityCredentials } from "@aws-sdk/credential-provider-cognito-identity/dist-types/fromCognitoIdentity"
// @ts-ignore
import Settings = require('./settings');
import {toUtf8} from "@aws-sdk/util-utf8-browser";
const $: JQueryStatic = jquery;
function log(msg: string) {
let now = new Date();
$('#console').append(`<pre>${now.toString()}: ${msg}</pre>`);
}
// AWSCognitoCredentialOptions. The credentials options used to create AWSCongnitoCredentialProvider.
interface AWSCognitoCredentialOptions
{
IdentityPoolId : string,
Region: string
}
// AWSCognitoCredentialsProvider. The AWSCognitoCredentialsProvider implements AWS.CognitoIdentityCredentials.
class AWSCognitoCredentialsProvider extends auth.CredentialsProvider{
private options: AWSCognitoCredentialOptions;
private cachedCredentials? : CognitoIdentityCredentials;
constructor(options: AWSCognitoCredentialOptions, expire_interval_in_ms? : number)
{
super();
this.options = options;
setInterval(async ()=>{
await this.refreshCredentials();
},expire_interval_in_ms?? 3600*1000);
}
getCredentials() : auth.AWSCredentials {
return {
aws_access_id: this.cachedCredentials?.accessKeyId ?? "",
aws_secret_key: this.cachedCredentials?.secretAccessKey ?? "",
aws_sts_token: this.cachedCredentials?.sessionToken,
aws_region: this.options.Region
}
}
async refreshCredentials() {
log('Fetching Cognito credentials');
this.cachedCredentials = await fromCognitoIdentityPool({
// Required. The unique identifier for the identity pool from which an identity should be
// retrieved or generated.
identityPoolId: this.options.IdentityPoolId,
clientConfig: { region: this.options.Region },
})();
}
}
// For the purposes of this sample, we need to associate certain variables with a particular MQTT5 client
// and to do so we use this class to hold all the data for a particular client used in the sample.
class SampleMqtt5Client {
client? : mqtt5.Mqtt5Client;
name? : string;
// Sets up the MQTT5 sample client using direct MQTT5 via mTLS with the passed input data.
public setupMqtt5Client(
provider: AWSCognitoCredentialsProvider,
input_endpoint : string, input_region: string, input_clientId : string, input_clientName : string)
{
this.name = input_clientName;
let wsConfig : iot.WebsocketSigv4Config = {
credentialsProvider: provider,
region: input_region
}
let builder: iot.AwsIotMqtt5ClientConfigBuilder = iot.AwsIotMqtt5ClientConfigBuilder.newWebsocketMqttBuilderWithSigv4Auth(
input_endpoint,
wsConfig
)
builder.withConnectProperties({
clientId: input_clientId,
keepAliveIntervalSeconds: 120
})
this.client = new mqtt5.Mqtt5Client(builder.build());
// Invoked when the client has an error
this.client.on('error', (error) => {
log("[" + this.name + "] Error: " + error.toString());
});
// Invoked when the client gets a message/publish on a subscribed topic
this.client.on("messageReceived",(eventData: mqtt5.MessageReceivedEvent) : void => {
log("[" + this.name + "]: Received a publish");
if (eventData.message.topicName) {
log("\tPublish received on topic: " + eventData.message.topicName);
}
if (eventData.message.payload) {
log("\tMessage: " + toUtf8(new Uint8Array(eventData.message.payload as ArrayBuffer)));
}
});
// Invoked when the client connects successfully to the endpoint
this.client.on('connectionSuccess', (eventData: mqtt5.ConnectionSuccessEvent) => {
log("[" + this.name + "]: Connection success");
});
// Invoked when the client fails to connect to the endpoint
this.client.on('connectionFailure', (eventData: mqtt5.ConnectionFailureEvent) => {
log("[" + this.name + "]: Connection failed with error: " + eventData.error.toString());
});
// Invoked when the client becomes disconnected
this.client.on('disconnection', (eventData: mqtt5.DisconnectionEvent) => {
log("[" + this.name + "]: Disconnected");
if (eventData.disconnect) {
if (eventData.disconnect.reasonCode == mqtt5.DisconnectReasonCode.SharedSubscriptionsNotSupported) {
log(
"[" + this.name + "]: Shared Subscriptions not supported!" +
"\nThis sample will not work unless the endpoint being connected to has Shared Subscriptions support.");
}
}
});
// Invoked when the client stops
this.client.on('stopped', (eventData: mqtt5.StoppedEvent) => {
log("[" + this.name + "]: Stopped");
});
}
// Helper function to make sample code a little cleaner
public async startClient() {
const connectionSuccess = once(this.client as mqtt5.Mqtt5Client, "connectionSuccess");
this.client?.start();
await connectionSuccess;
}
// Helper function to make sample code a little cleaner
public async stopClient() {
const stopped = once(this.client as mqtt5.Mqtt5Client, "stopped");
this.client?.stop();
await stopped;
}
}
async function runSample() {
// Pull data from the command line
let input_endpoint : string = Settings.AWS_IOT_ENDPOINT;
let input_region : string = Settings.AWS_REGION;
let input_clientId : string = Settings.INPUT_CLIENT_ID;
let input_topic : string = Settings.INPUT_TOPIC;
let input_count : number = Settings.INPUT_COUNT;
let input_message : string = Settings.INPUT_MESSAGE;
let input_groupIdentifier : string = Settings.INPUT_GROUP_IDENTIFIER;
let input_cognitoIdentityPoolId = Settings.AWS_COGNITO_IDENTITY_POOL_ID;
// Construct the shared topic
let input_shared_topic : string = "$share/" + input_groupIdentifier + "/" + input_topic;
/** Set up the credentialsProvider */
const provider = new AWSCognitoCredentialsProvider({
IdentityPoolId: input_cognitoIdentityPoolId,
Region: input_region});
/** Make sure the credential provider fetched before setup the connection */
await provider.refreshCredentials();
// Create the MQTT5 clients: one publisher and two subscribers
let publisher : SampleMqtt5Client = new SampleMqtt5Client()
publisher.setupMqtt5Client(provider, input_endpoint, input_region, input_clientId + "1", "Publisher");
let subscriber_one : SampleMqtt5Client = new SampleMqtt5Client()
subscriber_one.setupMqtt5Client(provider, input_endpoint, input_region, input_clientId + "2", "Subscriber One");
let subscriber_two : SampleMqtt5Client = new SampleMqtt5Client()
subscriber_two.setupMqtt5Client(provider, input_endpoint, input_region, input_clientId + "3", "Subscriber Two"); |
try
{
// Connect all the clients
await publisher.startClient();
await subscriber_one.startClient();
await subscriber_two.startClient();
// Subscribe to the shared topic on the two subscribers
await subscriber_one.client?.subscribe({subscriptions: [{qos: mqtt5.QoS.AtLeastOnce, topicFilter: input_shared_topic }]});
log("[" + subscriber_one.name + "]: Subscribed to topic '" + input_topic + "' in shared subscription group '" + input_groupIdentifier + "'.");
log("[" + subscriber_one.name + "]: Full subscribed topic is '" + input_shared_topic + "'.");
await subscriber_two.client?.subscribe({subscriptions: [{qos: mqtt5.QoS.AtLeastOnce, topicFilter: input_shared_topic }]});
log("[" + subscriber_two.name + "]: Subscribed to topic '" + input_topic + "' in shared subscription group '" + input_groupIdentifier + "'.");
log("[" + subscriber_two.name + "]: Full subscribed topic is '" + input_shared_topic + "'.");
// Publish using the publisher client
let publishPacket : mqtt5.PublishPacket = {
qos: mqtt5.QoS.AtLeastOnce,
topicName: input_topic,
payload: input_message
};
if (input_count > 0) {
let count = 0;
while (count++ < input_count) {
publishPacket.payload = input_message + ": " + count;
await publisher.client?.publish(publishPacket);
log("[" + publisher.name + "]: Published");
await new Promise(resolve => setTimeout(resolve, 1000));
}
// Wait 5 seconds to let the last publish go out before unsubscribing.
await new Promise(resolve => setTimeout(resolve, 5000));
} else {
log("Skipping publishing messages due to message count being zero...");
}
// Unsubscribe from the shared topic on the two subscribers
await subscriber_one.client?.unsubscribe({topicFilters: [ input_shared_topic ]});
log("[" + subscriber_one.name + "]: Unsubscribed to topic '" + input_topic + "' in shared subscription group '" + input_groupIdentifier + "'.");
log("[" + subscriber_one.name + "]: Full unsubscribed topic is '" + input_shared_topic + "'.");
await subscriber_two.client?.unsubscribe({topicFilters: [ input_shared_topic ]});
log("[" + | random_line_split | |
index.ts | import { fromCognitoIdentityPool } from "@aws-sdk/credential-providers";
import { CognitoIdentityCredentials } from "@aws-sdk/credential-provider-cognito-identity/dist-types/fromCognitoIdentity"
// @ts-ignore
import Settings = require('./settings');
import {toUtf8} from "@aws-sdk/util-utf8-browser";
const $: JQueryStatic = jquery;
function log(msg: string) {
let now = new Date();
$('#console').append(`<pre>${now.toString()}: ${msg}</pre>`);
}
// AWSCognitoCredentialOptions. The credentials options used to create AWSCongnitoCredentialProvider.
interface AWSCognitoCredentialOptions
{
IdentityPoolId : string,
Region: string
}
// AWSCognitoCredentialsProvider. The AWSCognitoCredentialsProvider implements AWS.CognitoIdentityCredentials.
class AWSCognitoCredentialsProvider extends auth.CredentialsProvider{
private options: AWSCognitoCredentialOptions;
private cachedCredentials? : CognitoIdentityCredentials;
constructor(options: AWSCognitoCredentialOptions, expire_interval_in_ms? : number)
{
super();
this.options = options;
setInterval(async ()=>{
await this.refreshCredentials();
},expire_interval_in_ms?? 3600*1000);
}
getCredentials() : auth.AWSCredentials {
return {
aws_access_id: this.cachedCredentials?.accessKeyId ?? "",
aws_secret_key: this.cachedCredentials?.secretAccessKey ?? "",
aws_sts_token: this.cachedCredentials?.sessionToken,
aws_region: this.options.Region
}
}
async refreshCredentials() {
log('Fetching Cognito credentials');
this.cachedCredentials = await fromCognitoIdentityPool({
// Required. The unique identifier for the identity pool from which an identity should be
// retrieved or generated.
identityPoolId: this.options.IdentityPoolId,
clientConfig: { region: this.options.Region },
})();
}
}
// For the purposes of this sample, we need to associate certain variables with a particular MQTT5 client
// and to do so we use this class to hold all the data for a particular client used in the sample.
class SampleMqtt5Client {
client? : mqtt5.Mqtt5Client;
name? : string;
// Sets up the MQTT5 sample client using direct MQTT5 via mTLS with the passed input data.
public | (
provider: AWSCognitoCredentialsProvider,
input_endpoint : string, input_region: string, input_clientId : string, input_clientName : string)
{
this.name = input_clientName;
let wsConfig : iot.WebsocketSigv4Config = {
credentialsProvider: provider,
region: input_region
}
let builder: iot.AwsIotMqtt5ClientConfigBuilder = iot.AwsIotMqtt5ClientConfigBuilder.newWebsocketMqttBuilderWithSigv4Auth(
input_endpoint,
wsConfig
)
builder.withConnectProperties({
clientId: input_clientId,
keepAliveIntervalSeconds: 120
})
this.client = new mqtt5.Mqtt5Client(builder.build());
// Invoked when the client has an error
this.client.on('error', (error) => {
log("[" + this.name + "] Error: " + error.toString());
});
// Invoked when the client gets a message/publish on a subscribed topic
this.client.on("messageReceived",(eventData: mqtt5.MessageReceivedEvent) : void => {
log("[" + this.name + "]: Received a publish");
if (eventData.message.topicName) {
log("\tPublish received on topic: " + eventData.message.topicName);
}
if (eventData.message.payload) {
log("\tMessage: " + toUtf8(new Uint8Array(eventData.message.payload as ArrayBuffer)));
}
});
// Invoked when the client connects successfully to the endpoint
this.client.on('connectionSuccess', (eventData: mqtt5.ConnectionSuccessEvent) => {
log("[" + this.name + "]: Connection success");
});
// Invoked when the client fails to connect to the endpoint
this.client.on('connectionFailure', (eventData: mqtt5.ConnectionFailureEvent) => {
log("[" + this.name + "]: Connection failed with error: " + eventData.error.toString());
});
// Invoked when the client becomes disconnected
this.client.on('disconnection', (eventData: mqtt5.DisconnectionEvent) => {
log("[" + this.name + "]: Disconnected");
if (eventData.disconnect) {
if (eventData.disconnect.reasonCode == mqtt5.DisconnectReasonCode.SharedSubscriptionsNotSupported) {
log(
"[" + this.name + "]: Shared Subscriptions not supported!" +
"\nThis sample will not work unless the endpoint being connected to has Shared Subscriptions support.");
}
}
});
// Invoked when the client stops
this.client.on('stopped', (eventData: mqtt5.StoppedEvent) => {
log("[" + this.name + "]: Stopped");
});
}
// Helper function to make sample code a little cleaner
public async startClient() {
const connectionSuccess = once(this.client as mqtt5.Mqtt5Client, "connectionSuccess");
this.client?.start();
await connectionSuccess;
}
// Helper function to make sample code a little cleaner
public async stopClient() {
const stopped = once(this.client as mqtt5.Mqtt5Client, "stopped");
this.client?.stop();
await stopped;
}
}
async function runSample() {
// Pull data from the command line
let input_endpoint : string = Settings.AWS_IOT_ENDPOINT;
let input_region : string = Settings.AWS_REGION;
let input_clientId : string = Settings.INPUT_CLIENT_ID;
let input_topic : string = Settings.INPUT_TOPIC;
let input_count : number = Settings.INPUT_COUNT;
let input_message : string = Settings.INPUT_MESSAGE;
let input_groupIdentifier : string = Settings.INPUT_GROUP_IDENTIFIER;
let input_cognitoIdentityPoolId = Settings.AWS_COGNITO_IDENTITY_POOL_ID;
// Construct the shared topic
let input_shared_topic : string = "$share/" + input_groupIdentifier + "/" + input_topic;
/** Set up the credentialsProvider */
const provider = new AWSCognitoCredentialsProvider({
IdentityPoolId: input_cognitoIdentityPoolId,
Region: input_region});
/** Make sure the credential provider fetched before setup the connection */
await provider.refreshCredentials();
// Create the MQTT5 clients: one publisher and two subscribers
let publisher : SampleMqtt5Client = new SampleMqtt5Client()
publisher.setupMqtt5Client(provider, input_endpoint, input_region, input_clientId + "1", "Publisher");
let subscriber_one : SampleMqtt5Client = new SampleMqtt5Client()
subscriber_one.setupMqtt5Client(provider, input_endpoint, input_region, input_clientId + "2", "Subscriber One");
let subscriber_two : SampleMqtt5Client = new SampleMqtt5Client()
subscriber_two.setupMqtt5Client(provider, input_endpoint, input_region, input_clientId + "3", "Subscriber Two");
try
{
// Connect all the clients
await publisher.startClient();
await subscriber_one.startClient();
await subscriber_two.startClient();
// Subscribe to the shared topic on the two subscribers
await subscriber_one.client?.subscribe({subscriptions: [{qos: mqtt5.QoS.AtLeastOnce, topicFilter: input_shared_topic }]});
log("[" + subscriber_one.name + "]: Subscribed to topic '" + input_topic + "' in shared subscription group '" + input_groupIdentifier + "'.");
log("[" + subscriber_one.name + "]: Full subscribed topic is '" + input_shared_topic + "'.");
await subscriber_two.client?.subscribe({subscriptions: [{qos: mqtt5.QoS.AtLeastOnce, topicFilter: input_shared_topic }]});
log("[" + subscriber_two.name + "]: Subscribed to topic '" + input_topic + "' in shared subscription group '" + input_groupIdentifier + "'.");
log("[" + subscriber_two.name + "]: Full subscribed topic is '" + input_shared_topic + "'.");
// Publish using the publisher client
let publishPacket : mqtt5.PublishPacket = {
qos: mqtt5.QoS.AtLeastOnce,
topicName: input_topic,
payload: input_message
};
if (input_count > 0) {
let count = 0;
while (count++ < input_count) {
publishPacket.payload = input_message + ": " + count;
await publisher.client?.publish(publishPacket);
log("[" + publisher.name + "]: Published");
await new Promise(resolve => setTimeout(resolve, 1000));
}
// Wait 5 seconds to let the last publish go out before unsubscribing.
await new Promise(resolve => setTimeout(resolve, 5000));
} else {
log("Skipping publishing messages due to message count being zero...");
}
// Unsubscribe from the shared topic on the two subscribers
await subscriber_one.client?.unsubscribe({topicFilters: [ input_shared_topic ]});
log("[" + subscriber_one.name + "]: Unsubscribed to topic '" + input_topic + "' in shared subscription group '" + input_groupIdentifier + "'.");
log("[" + subscriber_one.name + "]: Full unsubscribed topic is '" + input_shared_topic + "'.");
await subscriber_two.client?.unsubscribe({topicFilters: [ input_shared_topic ]});
log("[" + | setupMqtt5Client | identifier_name |
elements.rs | under the License.
*/
use crate::{
BinaryReader, BinaryReaderError, ConstExpr, ExternalKind, Result, SectionIteratorLimited,
SectionReader, SectionWithLimitedItems, ValType,
};
use std::ops::Range;
/// Represents a core WebAssembly element segment.
#[derive(Clone)]
pub struct Element<'a> {
/// The kind of the element segment.
pub kind: ElementKind<'a>,
/// The initial elements of the element segment.
pub items: ElementItems<'a>,
/// The type of the elements.
pub ty: ValType,
/// The range of the the element segment.
pub range: Range<usize>,
}
/// The kind of element segment.
#[derive(Clone)]
pub enum ElementKind<'a> {
/// The element segment is passive.
Passive,
/// The element segment is active.
Active {
/// The index of the table being initialized.
table_index: u32,
/// The initial expression of the element segment.
offset_expr: ConstExpr<'a>,
},
/// The element segment is declared.
Declared,
}
/// Represents the items of an element segment.
#[derive(Debug, Copy, Clone)]
pub struct ElementItems<'a> {
exprs: bool,
offset: usize,
data: &'a [u8],
}
/// Represents an individual item of an element segment.
#[derive(Debug)]
pub enum ElementItem<'a> {
/// The item is a function index.
Func(u32),
/// The item is an initialization expression.
Expr(ConstExpr<'a>),
}
impl<'a> ElementItems<'a> {
/// Gets an items reader for the items in an element segment.
pub fn get_items_reader<'b>(&self) -> Result<ElementItemsReader<'b>>
where
'a: 'b,
{
ElementItemsReader::new(self.data, self.offset, self.exprs)
}
}
/// A reader for element items in an element segment.
pub struct ElementItemsReader<'a> {
reader: BinaryReader<'a>,
count: u32,
exprs: bool,
}
impl<'a> ElementItemsReader<'a> {
/// Constructs a new `ElementItemsReader` for the given data and offset.
pub fn new(data: &[u8], offset: usize, exprs: bool) -> Result<ElementItemsReader> {
let mut reader = BinaryReader::new_with_offset(data, offset);
let count = reader.read_var_u32()?;
Ok(ElementItemsReader {
reader,
count,
exprs,
})
}
/// Gets the original position of the reader.
pub fn original_position(&self) -> usize {
self.reader.original_position()
}
/// Gets the count of element items in the segment.
pub fn get_count(&self) -> u32 {
self.count
}
/// Whether or not initialization expressions are used.
pub fn uses_exprs(&self) -> bool {
self.exprs
}
/// Reads an element item from the segment.
pub fn read(&mut self) -> Result<ElementItem<'a>> {
if self.exprs {
let expr = self.reader.read_const_expr()?;
Ok(ElementItem::Expr(expr))
} else {
let idx = self.reader.read_var_u32()?;
Ok(ElementItem::Func(idx))
}
}
}
impl<'a> IntoIterator for ElementItemsReader<'a> {
type Item = Result<ElementItem<'a>>;
type IntoIter = ElementItemsIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
let count = self.count;
ElementItemsIterator {
reader: self,
left: count,
err: false,
}
}
}
/// An iterator over element items in an element segment.
pub struct ElementItemsIterator<'a> {
reader: ElementItemsReader<'a>,
left: u32,
err: bool,
}
impl<'a> Iterator for ElementItemsIterator<'a> {
type Item = Result<ElementItem<'a>>;
fn next(&mut self) -> Option<Self::Item> |
fn size_hint(&self) -> (usize, Option<usize>) {
let count = self.reader.get_count() as usize;
(count, Some(count))
}
}
/// A reader for the element section of a WebAssembly module.
#[derive(Clone)]
pub struct ElementSectionReader<'a> {
reader: BinaryReader<'a>,
count: u32,
}
impl<'a> ElementSectionReader<'a> {
/// Constructs a new `ElementSectionReader` for the given data and offset.
pub fn new(data: &'a [u8], offset: usize) -> Result<ElementSectionReader<'a>> {
let mut reader = BinaryReader::new_with_offset(data, offset);
let count = reader.read_var_u32()?;
Ok(ElementSectionReader { reader, count })
}
/// Gets the original position of the section reader.
pub fn original_position(&self) -> usize {
self.reader.original_position()
}
/// Gets the count of items in the section.
pub fn get_count(&self) -> u32 {
self.count
}
/// Reads content of the element section.
///
/// # Examples
///
/// ```no_run
/// # let data: &[u8] = &[];
/// use wasmparser::{ElementSectionReader, ElementKind};
/// let mut element_reader = ElementSectionReader::new(data, 0).unwrap();
/// for _ in 0..element_reader.get_count() {
/// let element = element_reader.read().expect("element");
/// if let ElementKind::Active { offset_expr, .. } = element.kind {
/// let mut offset_expr_reader = offset_expr.get_binary_reader();
/// let op = offset_expr_reader.read_operator().expect("op");
/// println!("offset expression: {:?}", op);
/// }
/// let mut items_reader = element.items.get_items_reader().expect("items reader");
/// for _ in 0..items_reader.get_count() {
/// let item = items_reader.read().expect("item");
/// println!(" Item: {:?}", item);
/// }
/// }
/// ```
pub fn read<'b>(&mut self) -> Result<Element<'b>>
where
'a: 'b,
{
let elem_start = self.reader.original_position();
// The current handling of the flags is largely specified in the `bulk-memory` proposal,
// which at the time this commend is written has been merged to the main specification
// draft.
//
// Notably, this proposal allows multiple different encodings of the table index 0. `00`
// and `02 00` are both valid ways to specify the 0-th table. However it also makes
// another encoding of the 0-th memory `80 00` no longer valid.
//
// We, however maintain this support by parsing `flags` as a LEB128 integer. In that case,
// `80 00` encoding is parsed out as `0` and is therefore assigned a `tableidx` 0, even
// though the current specification draft does not allow for this.
//
// See also https://github.com/WebAssembly/spec/issues/1439
let flags = self.reader.read_var_u32()?;
if (flags & !0b111) != 0 {
return Err(BinaryReaderError::new(
"invalid flags byte in element segment",
self.reader.original_position() - 1,
));
}
let kind = if flags & 0b001 != 0 {
if flags & 0b010 != 0 {
ElementKind::Declared
} else {
ElementKind::Passive
}
} else {
let table_index = if flags & 0b010 == 0 {
0
} else {
self.reader.read_var_u32()?
};
let offset_expr = {
let expr_offset = self.reader.position;
self.reader.skip_const_expr()?;
let data = &self.reader.buffer[expr_offset..self.reader.position];
ConstExpr::new(data, self.reader.original_offset + expr_offset)
};
ElementKind::Active {
table_index,
offset_expr,
}
};
let exprs = flags & 0b100 != 0;
let ty = if flags & 0b011 != 0 {
if exprs {
self.reader.read_val_type()?
} else {
match self.reader.read_external_kind()? {
ExternalKind::Func => ValType::FuncRef,
_ => {
return Err(BinaryReaderError::new(
"only the function external type is supported in elem segment",
self.reader.original_position() - 1,
));
}
}
}
} else {
ValType::FuncRef
};
let data_start = self.reader.position;
let items_count = self.reader.read_var_u32()?;
if exprs {
for _ in 0..items_count {
self.reader.skip_const_expr()?;
}
} else {
for _ in 0..items_count {
self.reader.read_var_u | {
if self.err || self.left == 0 {
return None;
}
let result = self.reader.read();
self.err = result.is_err();
self.left -= 1;
Some(result)
} | identifier_body |
elements.rs | under the License.
*/
use crate::{
BinaryReader, BinaryReaderError, ConstExpr, ExternalKind, Result, SectionIteratorLimited,
SectionReader, SectionWithLimitedItems, ValType,
};
use std::ops::Range;
/// Represents a core WebAssembly element segment.
#[derive(Clone)]
pub struct Element<'a> {
/// The kind of the element segment.
pub kind: ElementKind<'a>,
/// The initial elements of the element segment.
pub items: ElementItems<'a>,
/// The type of the elements.
pub ty: ValType,
/// The range of the the element segment.
pub range: Range<usize>,
}
/// The kind of element segment.
#[derive(Clone)]
pub enum ElementKind<'a> {
/// The element segment is passive.
Passive,
/// The element segment is active.
Active {
/// The index of the table being initialized.
table_index: u32,
/// The initial expression of the element segment.
offset_expr: ConstExpr<'a>,
},
/// The element segment is declared.
Declared,
}
/// Represents the items of an element segment.
#[derive(Debug, Copy, Clone)]
pub struct ElementItems<'a> {
exprs: bool,
offset: usize,
data: &'a [u8],
}
/// Represents an individual item of an element segment.
#[derive(Debug)]
pub enum ElementItem<'a> {
/// The item is a function index.
Func(u32),
/// The item is an initialization expression.
Expr(ConstExpr<'a>),
}
impl<'a> ElementItems<'a> {
/// Gets an items reader for the items in an element segment.
pub fn get_items_reader<'b>(&self) -> Result<ElementItemsReader<'b>>
where
'a: 'b,
{
ElementItemsReader::new(self.data, self.offset, self.exprs)
}
}
/// A reader for element items in an element segment.
pub struct ElementItemsReader<'a> {
reader: BinaryReader<'a>,
count: u32,
exprs: bool,
}
impl<'a> ElementItemsReader<'a> {
/// Constructs a new `ElementItemsReader` for the given data and offset.
pub fn new(data: &[u8], offset: usize, exprs: bool) -> Result<ElementItemsReader> {
let mut reader = BinaryReader::new_with_offset(data, offset);
let count = reader.read_var_u32()?;
Ok(ElementItemsReader {
reader,
count,
exprs,
})
}
/// Gets the original position of the reader.
pub fn original_position(&self) -> usize {
self.reader.original_position()
}
/// Gets the count of element items in the segment.
pub fn get_count(&self) -> u32 {
self.count
}
/// Whether or not initialization expressions are used.
pub fn uses_exprs(&self) -> bool {
self.exprs
}
/// Reads an element item from the segment.
pub fn read(&mut self) -> Result<ElementItem<'a>> {
if self.exprs {
let expr = self.reader.read_const_expr()?;
Ok(ElementItem::Expr(expr))
} else {
let idx = self.reader.read_var_u32()?;
Ok(ElementItem::Func(idx))
}
}
}
impl<'a> IntoIterator for ElementItemsReader<'a> {
type Item = Result<ElementItem<'a>>;
type IntoIter = ElementItemsIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
let count = self.count;
ElementItemsIterator {
reader: self,
left: count,
err: false,
}
}
}
/// An iterator over element items in an element segment.
pub struct ElementItemsIterator<'a> {
reader: ElementItemsReader<'a>,
left: u32,
err: bool,
}
impl<'a> Iterator for ElementItemsIterator<'a> {
type Item = Result<ElementItem<'a>>;
fn next(&mut self) -> Option<Self::Item> {
if self.err || self.left == 0 {
return None;
}
let result = self.reader.read();
self.err = result.is_err();
self.left -= 1;
Some(result)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let count = self.reader.get_count() as usize;
(count, Some(count))
}
}
/// A reader for the element section of a WebAssembly module.
#[derive(Clone)]
pub struct ElementSectionReader<'a> {
reader: BinaryReader<'a>,
count: u32,
}
impl<'a> ElementSectionReader<'a> {
/// Constructs a new `ElementSectionReader` for the given data and offset.
pub fn new(data: &'a [u8], offset: usize) -> Result<ElementSectionReader<'a>> {
let mut reader = BinaryReader::new_with_offset(data, offset);
let count = reader.read_var_u32()?;
Ok(ElementSectionReader { reader, count })
}
/// Gets the original position of the section reader.
pub fn original_position(&self) -> usize {
self.reader.original_position()
}
/// Gets the count of items in the section.
pub fn get_count(&self) -> u32 {
self.count
}
/// Reads content of the element section.
///
/// # Examples
///
/// ```no_run
/// # let data: &[u8] = &[];
/// use wasmparser::{ElementSectionReader, ElementKind};
/// let mut element_reader = ElementSectionReader::new(data, 0).unwrap();
/// for _ in 0..element_reader.get_count() {
/// let element = element_reader.read().expect("element");
/// if let ElementKind::Active { offset_expr, .. } = element.kind {
/// let mut offset_expr_reader = offset_expr.get_binary_reader();
/// let op = offset_expr_reader.read_operator().expect("op");
/// println!("offset expression: {:?}", op);
/// }
/// let mut items_reader = element.items.get_items_reader().expect("items reader"); | /// ```
pub fn read<'b>(&mut self) -> Result<Element<'b>>
where
'a: 'b,
{
let elem_start = self.reader.original_position();
// The current handling of the flags is largely specified in the `bulk-memory` proposal,
// which at the time this commend is written has been merged to the main specification
// draft.
//
// Notably, this proposal allows multiple different encodings of the table index 0. `00`
// and `02 00` are both valid ways to specify the 0-th table. However it also makes
// another encoding of the 0-th memory `80 00` no longer valid.
//
// We, however maintain this support by parsing `flags` as a LEB128 integer. In that case,
// `80 00` encoding is parsed out as `0` and is therefore assigned a `tableidx` 0, even
// though the current specification draft does not allow for this.
//
// See also https://github.com/WebAssembly/spec/issues/1439
let flags = self.reader.read_var_u32()?;
if (flags & !0b111) != 0 {
return Err(BinaryReaderError::new(
"invalid flags byte in element segment",
self.reader.original_position() - 1,
));
}
let kind = if flags & 0b001 != 0 {
if flags & 0b010 != 0 {
ElementKind::Declared
} else {
ElementKind::Passive
}
} else {
let table_index = if flags & 0b010 == 0 {
0
} else {
self.reader.read_var_u32()?
};
let offset_expr = {
let expr_offset = self.reader.position;
self.reader.skip_const_expr()?;
let data = &self.reader.buffer[expr_offset..self.reader.position];
ConstExpr::new(data, self.reader.original_offset + expr_offset)
};
ElementKind::Active {
table_index,
offset_expr,
}
};
let exprs = flags & 0b100 != 0;
let ty = if flags & 0b011 != 0 {
if exprs {
self.reader.read_val_type()?
} else {
match self.reader.read_external_kind()? {
ExternalKind::Func => ValType::FuncRef,
_ => {
return Err(BinaryReaderError::new(
"only the function external type is supported in elem segment",
self.reader.original_position() - 1,
));
}
}
}
} else {
ValType::FuncRef
};
let data_start = self.reader.position;
let items_count = self.reader.read_var_u32()?;
if exprs {
for _ in 0..items_count {
self.reader.skip_const_expr()?;
}
} else {
for _ in 0..items_count {
self.reader.read_var_u32 | /// for _ in 0..items_reader.get_count() {
/// let item = items_reader.read().expect("item");
/// println!(" Item: {:?}", item);
/// }
/// } | random_line_split |
elements.rs | under the License.
*/
use crate::{
BinaryReader, BinaryReaderError, ConstExpr, ExternalKind, Result, SectionIteratorLimited,
SectionReader, SectionWithLimitedItems, ValType,
};
use std::ops::Range;
/// Represents a core WebAssembly element segment.
#[derive(Clone)]
pub struct Element<'a> {
/// The kind of the element segment.
pub kind: ElementKind<'a>,
/// The initial elements of the element segment.
pub items: ElementItems<'a>,
/// The type of the elements.
pub ty: ValType,
/// The range of the the element segment.
pub range: Range<usize>,
}
/// The kind of element segment.
#[derive(Clone)]
pub enum ElementKind<'a> {
/// The element segment is passive.
Passive,
/// The element segment is active.
Active {
/// The index of the table being initialized.
table_index: u32,
/// The initial expression of the element segment.
offset_expr: ConstExpr<'a>,
},
/// The element segment is declared.
Declared,
}
/// Represents the items of an element segment.
#[derive(Debug, Copy, Clone)]
pub struct ElementItems<'a> {
exprs: bool,
offset: usize,
data: &'a [u8],
}
/// Represents an individual item of an element segment.
#[derive(Debug)]
pub enum ElementItem<'a> {
/// The item is a function index.
Func(u32),
/// The item is an initialization expression.
Expr(ConstExpr<'a>),
}
impl<'a> ElementItems<'a> {
/// Gets an items reader for the items in an element segment.
pub fn get_items_reader<'b>(&self) -> Result<ElementItemsReader<'b>>
where
'a: 'b,
{
ElementItemsReader::new(self.data, self.offset, self.exprs)
}
}
/// A reader for element items in an element segment.
pub struct ElementItemsReader<'a> {
reader: BinaryReader<'a>,
count: u32,
exprs: bool,
}
impl<'a> ElementItemsReader<'a> {
/// Constructs a new `ElementItemsReader` for the given data and offset.
pub fn new(data: &[u8], offset: usize, exprs: bool) -> Result<ElementItemsReader> {
let mut reader = BinaryReader::new_with_offset(data, offset);
let count = reader.read_var_u32()?;
Ok(ElementItemsReader {
reader,
count,
exprs,
})
}
/// Gets the original position of the reader.
pub fn original_position(&self) -> usize {
self.reader.original_position()
}
/// Gets the count of element items in the segment.
pub fn get_count(&self) -> u32 {
self.count
}
/// Whether or not initialization expressions are used.
pub fn uses_exprs(&self) -> bool {
self.exprs
}
/// Reads an element item from the segment.
pub fn read(&mut self) -> Result<ElementItem<'a>> {
if self.exprs {
let expr = self.reader.read_const_expr()?;
Ok(ElementItem::Expr(expr))
} else {
let idx = self.reader.read_var_u32()?;
Ok(ElementItem::Func(idx))
}
}
}
impl<'a> IntoIterator for ElementItemsReader<'a> {
type Item = Result<ElementItem<'a>>;
type IntoIter = ElementItemsIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
let count = self.count;
ElementItemsIterator {
reader: self,
left: count,
err: false,
}
}
}
/// An iterator over element items in an element segment.
pub struct ElementItemsIterator<'a> {
reader: ElementItemsReader<'a>,
left: u32,
err: bool,
}
impl<'a> Iterator for ElementItemsIterator<'a> {
type Item = Result<ElementItem<'a>>;
fn next(&mut self) -> Option<Self::Item> {
if self.err || self.left == 0 {
return None;
}
let result = self.reader.read();
self.err = result.is_err();
self.left -= 1;
Some(result)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let count = self.reader.get_count() as usize;
(count, Some(count))
}
}
/// A reader for the element section of a WebAssembly module.
#[derive(Clone)]
pub struct ElementSectionReader<'a> {
reader: BinaryReader<'a>,
count: u32,
}
impl<'a> ElementSectionReader<'a> {
/// Constructs a new `ElementSectionReader` for the given data and offset.
pub fn new(data: &'a [u8], offset: usize) -> Result<ElementSectionReader<'a>> {
let mut reader = BinaryReader::new_with_offset(data, offset);
let count = reader.read_var_u32()?;
Ok(ElementSectionReader { reader, count })
}
/// Gets the original position of the section reader.
pub fn original_position(&self) -> usize {
self.reader.original_position()
}
/// Gets the count of items in the section.
pub fn get_count(&self) -> u32 {
self.count
}
/// Reads content of the element section.
///
/// # Examples
///
/// ```no_run
/// # let data: &[u8] = &[];
/// use wasmparser::{ElementSectionReader, ElementKind};
/// let mut element_reader = ElementSectionReader::new(data, 0).unwrap();
/// for _ in 0..element_reader.get_count() {
/// let element = element_reader.read().expect("element");
/// if let ElementKind::Active { offset_expr, .. } = element.kind {
/// let mut offset_expr_reader = offset_expr.get_binary_reader();
/// let op = offset_expr_reader.read_operator().expect("op");
/// println!("offset expression: {:?}", op);
/// }
/// let mut items_reader = element.items.get_items_reader().expect("items reader");
/// for _ in 0..items_reader.get_count() {
/// let item = items_reader.read().expect("item");
/// println!(" Item: {:?}", item);
/// }
/// }
/// ```
pub fn read<'b>(&mut self) -> Result<Element<'b>>
where
'a: 'b,
{
let elem_start = self.reader.original_position();
// The current handling of the flags is largely specified in the `bulk-memory` proposal,
// which at the time this commend is written has been merged to the main specification
// draft.
//
// Notably, this proposal allows multiple different encodings of the table index 0. `00`
// and `02 00` are both valid ways to specify the 0-th table. However it also makes
// another encoding of the 0-th memory `80 00` no longer valid.
//
// We, however maintain this support by parsing `flags` as a LEB128 integer. In that case,
// `80 00` encoding is parsed out as `0` and is therefore assigned a `tableidx` 0, even
// though the current specification draft does not allow for this.
//
// See also https://github.com/WebAssembly/spec/issues/1439
let flags = self.reader.read_var_u32()?;
if (flags & !0b111) != 0 |
let kind = if flags & 0b001 != 0 {
if flags & 0b010 != 0 {
ElementKind::Declared
} else {
ElementKind::Passive
}
} else {
let table_index = if flags & 0b010 == 0 {
0
} else {
self.reader.read_var_u32()?
};
let offset_expr = {
let expr_offset = self.reader.position;
self.reader.skip_const_expr()?;
let data = &self.reader.buffer[expr_offset..self.reader.position];
ConstExpr::new(data, self.reader.original_offset + expr_offset)
};
ElementKind::Active {
table_index,
offset_expr,
}
};
let exprs = flags & 0b100 != 0;
let ty = if flags & 0b011 != 0 {
if exprs {
self.reader.read_val_type()?
} else {
match self.reader.read_external_kind()? {
ExternalKind::Func => ValType::FuncRef,
_ => {
return Err(BinaryReaderError::new(
"only the function external type is supported in elem segment",
self.reader.original_position() - 1,
));
}
}
}
} else {
ValType::FuncRef
};
let data_start = self.reader.position;
let items_count = self.reader.read_var_u32()?;
if exprs {
for _ in 0..items_count {
self.reader.skip_const_expr()?;
}
} else {
for _ in 0..items_count {
self.reader.read_var_u | {
return Err(BinaryReaderError::new(
"invalid flags byte in element segment",
self.reader.original_position() - 1,
));
} | conditional_block |
elements.rs | under the License.
*/
use crate::{
BinaryReader, BinaryReaderError, ConstExpr, ExternalKind, Result, SectionIteratorLimited,
SectionReader, SectionWithLimitedItems, ValType,
};
use std::ops::Range;
/// Represents a core WebAssembly element segment.
#[derive(Clone)]
pub struct Element<'a> {
/// The kind of the element segment.
pub kind: ElementKind<'a>,
/// The initial elements of the element segment.
pub items: ElementItems<'a>,
/// The type of the elements.
pub ty: ValType,
/// The range of the the element segment.
pub range: Range<usize>,
}
/// The kind of element segment.
#[derive(Clone)]
pub enum ElementKind<'a> {
/// The element segment is passive.
Passive,
/// The element segment is active.
Active {
/// The index of the table being initialized.
table_index: u32,
/// The initial expression of the element segment.
offset_expr: ConstExpr<'a>,
},
/// The element segment is declared.
Declared,
}
/// Represents the items of an element segment.
#[derive(Debug, Copy, Clone)]
pub struct ElementItems<'a> {
exprs: bool,
offset: usize,
data: &'a [u8],
}
/// Represents an individual item of an element segment.
#[derive(Debug)]
pub enum ElementItem<'a> {
/// The item is a function index.
Func(u32),
/// The item is an initialization expression.
Expr(ConstExpr<'a>),
}
impl<'a> ElementItems<'a> {
/// Gets an items reader for the items in an element segment.
pub fn get_items_reader<'b>(&self) -> Result<ElementItemsReader<'b>>
where
'a: 'b,
{
ElementItemsReader::new(self.data, self.offset, self.exprs)
}
}
/// A reader for element items in an element segment.
pub struct ElementItemsReader<'a> {
reader: BinaryReader<'a>,
count: u32,
exprs: bool,
}
impl<'a> ElementItemsReader<'a> {
/// Constructs a new `ElementItemsReader` for the given data and offset.
pub fn new(data: &[u8], offset: usize, exprs: bool) -> Result<ElementItemsReader> {
let mut reader = BinaryReader::new_with_offset(data, offset);
let count = reader.read_var_u32()?;
Ok(ElementItemsReader {
reader,
count,
exprs,
})
}
/// Gets the original position of the reader.
pub fn original_position(&self) -> usize {
self.reader.original_position()
}
/// Gets the count of element items in the segment.
pub fn get_count(&self) -> u32 {
self.count
}
/// Whether or not initialization expressions are used.
pub fn uses_exprs(&self) -> bool {
self.exprs
}
/// Reads an element item from the segment.
pub fn read(&mut self) -> Result<ElementItem<'a>> {
if self.exprs {
let expr = self.reader.read_const_expr()?;
Ok(ElementItem::Expr(expr))
} else {
let idx = self.reader.read_var_u32()?;
Ok(ElementItem::Func(idx))
}
}
}
impl<'a> IntoIterator for ElementItemsReader<'a> {
type Item = Result<ElementItem<'a>>;
type IntoIter = ElementItemsIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
let count = self.count;
ElementItemsIterator {
reader: self,
left: count,
err: false,
}
}
}
/// An iterator over element items in an element segment.
pub struct ElementItemsIterator<'a> {
reader: ElementItemsReader<'a>,
left: u32,
err: bool,
}
impl<'a> Iterator for ElementItemsIterator<'a> {
type Item = Result<ElementItem<'a>>;
fn next(&mut self) -> Option<Self::Item> {
if self.err || self.left == 0 {
return None;
}
let result = self.reader.read();
self.err = result.is_err();
self.left -= 1;
Some(result)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let count = self.reader.get_count() as usize;
(count, Some(count))
}
}
/// A reader for the element section of a WebAssembly module.
#[derive(Clone)]
pub struct ElementSectionReader<'a> {
reader: BinaryReader<'a>,
count: u32,
}
impl<'a> ElementSectionReader<'a> {
/// Constructs a new `ElementSectionReader` for the given data and offset.
pub fn new(data: &'a [u8], offset: usize) -> Result<ElementSectionReader<'a>> {
let mut reader = BinaryReader::new_with_offset(data, offset);
let count = reader.read_var_u32()?;
Ok(ElementSectionReader { reader, count })
}
/// Gets the original position of the section reader.
pub fn original_position(&self) -> usize {
self.reader.original_position()
}
/// Gets the count of items in the section.
pub fn | (&self) -> u32 {
self.count
}
/// Reads content of the element section.
///
/// # Examples
///
/// ```no_run
/// # let data: &[u8] = &[];
/// use wasmparser::{ElementSectionReader, ElementKind};
/// let mut element_reader = ElementSectionReader::new(data, 0).unwrap();
/// for _ in 0..element_reader.get_count() {
/// let element = element_reader.read().expect("element");
/// if let ElementKind::Active { offset_expr, .. } = element.kind {
/// let mut offset_expr_reader = offset_expr.get_binary_reader();
/// let op = offset_expr_reader.read_operator().expect("op");
/// println!("offset expression: {:?}", op);
/// }
/// let mut items_reader = element.items.get_items_reader().expect("items reader");
/// for _ in 0..items_reader.get_count() {
/// let item = items_reader.read().expect("item");
/// println!(" Item: {:?}", item);
/// }
/// }
/// ```
pub fn read<'b>(&mut self) -> Result<Element<'b>>
where
'a: 'b,
{
let elem_start = self.reader.original_position();
// The current handling of the flags is largely specified in the `bulk-memory` proposal,
// which at the time this commend is written has been merged to the main specification
// draft.
//
// Notably, this proposal allows multiple different encodings of the table index 0. `00`
// and `02 00` are both valid ways to specify the 0-th table. However it also makes
// another encoding of the 0-th memory `80 00` no longer valid.
//
// We, however maintain this support by parsing `flags` as a LEB128 integer. In that case,
// `80 00` encoding is parsed out as `0` and is therefore assigned a `tableidx` 0, even
// though the current specification draft does not allow for this.
//
// See also https://github.com/WebAssembly/spec/issues/1439
let flags = self.reader.read_var_u32()?;
if (flags & !0b111) != 0 {
return Err(BinaryReaderError::new(
"invalid flags byte in element segment",
self.reader.original_position() - 1,
));
}
let kind = if flags & 0b001 != 0 {
if flags & 0b010 != 0 {
ElementKind::Declared
} else {
ElementKind::Passive
}
} else {
let table_index = if flags & 0b010 == 0 {
0
} else {
self.reader.read_var_u32()?
};
let offset_expr = {
let expr_offset = self.reader.position;
self.reader.skip_const_expr()?;
let data = &self.reader.buffer[expr_offset..self.reader.position];
ConstExpr::new(data, self.reader.original_offset + expr_offset)
};
ElementKind::Active {
table_index,
offset_expr,
}
};
let exprs = flags & 0b100 != 0;
let ty = if flags & 0b011 != 0 {
if exprs {
self.reader.read_val_type()?
} else {
match self.reader.read_external_kind()? {
ExternalKind::Func => ValType::FuncRef,
_ => {
return Err(BinaryReaderError::new(
"only the function external type is supported in elem segment",
self.reader.original_position() - 1,
));
}
}
}
} else {
ValType::FuncRef
};
let data_start = self.reader.position;
let items_count = self.reader.read_var_u32()?;
if exprs {
for _ in 0..items_count {
self.reader.skip_const_expr()?;
}
} else {
for _ in 0..items_count {
self.reader.read_var_u3 | get_count | identifier_name |
graph_runner.ts | const DEFAULT_INFERENCE_EXAMPLE_INTERVAL_MS = 3000;
export interface GraphRunnerEventObserver {
batchesTrainedCallback?: (totalBatchesTrained: number) => void;
avgCostCallback?: (avgCost: Scalar) => void;
metricCallback?: (metric: Tensor) => void;
inferenceExamplesCallback?:
(feeds: FeedEntry[][], inferenceValues: Tensor[]) => void;
inferenceExamplesPerSecCallback?: (examplesPerSec: number) => void;
trainExamplesPerSecCallback?: (examplesPerSec: number) => void;
totalTimeCallback?: (totalTimeSec: number) => void;
doneTrainingCallback?: () => void;
}
export enum MetricReduction {
SUM,
MEAN
}
/**
* A class that drives the training of a graph model given a dataset. It allows
* the user to provide a set of callbacks for measurements like cost, accuracy,
* and speed of training.
*/
export class GraphRunner {
private costTensor: SymbolicTensor;
private trainFeedEntries: FeedEntry[];
private batchSize: number;
private optimizer: Optimizer;
private currentTrainLoopNumBatches: number|undefined;
private costIntervalMs: number;
private metricTensor: SymbolicTensor|undefined;
private metricFeedEntries: FeedEntry[]|undefined;
private metricBatchSize: number|undefined;
private metricReduction: MetricReduction;
private metricIntervalMs: number;
private inferenceTensor: SymbolicTensor;
private inferenceFeedEntries: FeedEntry[]|undefined;
private inferenceExampleIntervalMs: number;
private inferenceExampleCount: number;
// Runtime information.
private isTraining: boolean;
private totalBatchesTrained: number;
private batchesTrainedThisRun: number;
private lastComputedMetric: Scalar;
private isInferring: boolean;
private lastInferTimeoutID: number;
private currentInferenceLoopNumPasses: number|undefined;
private inferencePassesThisRun: number;
private trainStartTimestamp: number;
private lastCostTimestamp = 0;
private lastEvalTimestamp = 0;
private zeroScalar: Scalar;
private metricBatchSizeScalar: Scalar;
constructor(
private math: NDArrayMath, private session: Session,
private eventObserver: GraphRunnerEventObserver) {
this.resetStatistics();
this.zeroScalar = Scalar.new(0);
}
resetStatistics() {
this.totalBatchesTrained = 0;
}
/**
* Start the training loop with an optional number of batches to train for.
* Optionally takes a metric tensor and feed entries to compute periodically.
* This can be used for computing accuracy, or a similar metric.
*/
train(
costTensor: SymbolicTensor, trainFeedEntries: FeedEntry[],
batchSize: number, optimizer: Optimizer, numBatches?: number,
metricTensor?: SymbolicTensor, metricFeedEntries?: FeedEntry[],
metricBatchSize?: number, metricReduction = MetricReduction.MEAN,
evalIntervalMs = DEFAULT_EVAL_INTERVAL_MS,
costIntervalMs = DEFAULT_COST_INTERVAL_MS) {
this.costTensor = costTensor;
this.trainFeedEntries = trainFeedEntries;
this.metricTensor = metricTensor;
this.metricFeedEntries = metricFeedEntries;
if (metricBatchSize != null && this.metricBatchSize !== metricBatchSize) {
if (this.metricBatchSizeScalar != null) {
this.metricBatchSizeScalar.dispose();
}
this.metricBatchSizeScalar = Scalar.new(metricBatchSize);
}
this.metricBatchSize = metricBatchSize;
this.metricReduction = metricReduction;
this.batchSize = batchSize;
this.optimizer = optimizer;
this.metricIntervalMs = evalIntervalMs;
this.costIntervalMs = costIntervalMs;
this.currentTrainLoopNumBatches = numBatches;
this.batchesTrainedThisRun = 0;
this.isTraining = true;
this.trainStartTimestamp = performance.now();
this.trainNetwork();
}
stopTraining() {
this.isTraining = false;
}
resumeTraining() {
this.isTraining = true;
this.trainNetwork();
}
private trainNetwork() {
if (this.batchesTrainedThisRun === this.currentTrainLoopNumBatches) {
this.stopTraining();
}
if (!this.isTraining) {
if (this.eventObserver.doneTrainingCallback != null) {
this.eventObserver.doneTrainingCallback();
}
return;
}
const start = performance.now();
const shouldComputeCost = this.eventObserver.avgCostCallback != null &&
(start - this.lastCostTimestamp > this.costIntervalMs);
if (shouldComputeCost) {
this.lastCostTimestamp = start;
}
const costReduction =
shouldComputeCost ? CostReduction.MEAN : CostReduction.NONE;
tidy(() => {
const avgCost = this.session.train(
this.costTensor, this.trainFeedEntries, this.batchSize,
this.optimizer, costReduction);
if (shouldComputeCost) {
const trainTime = performance.now() - start;
this.eventObserver.avgCostCallback(avgCost);
if (this.eventObserver.trainExamplesPerSecCallback != null) {
const examplesPerSec = (this.batchSize * 1000 / trainTime);
this.eventObserver.trainExamplesPerSecCallback(examplesPerSec);
}
}
if (this.eventObserver.metricCallback != null &&
this.metricFeedEntries != null &&
start - this.lastEvalTimestamp > this.metricIntervalMs) {
this.lastEvalTimestamp = start;
if (this.lastComputedMetric != null) {
this.lastComputedMetric.dispose();
}
this.lastComputedMetric = this.computeMetric();
this.eventObserver.metricCallback(this.lastComputedMetric);
}
if (this.eventObserver.totalTimeCallback != null) {
this.eventObserver.totalTimeCallback(
(start - this.trainStartTimestamp) / 1000);
}
this.batchesTrainedThisRun++;
this.totalBatchesTrained++;
if (this.eventObserver.batchesTrainedCallback != null) {
this.eventObserver.batchesTrainedCallback(this.totalBatchesTrained);
}
});
requestAnimationFrame(() => this.trainNetwork());
}
infer(
inferenceTensor: SymbolicTensor, inferenceFeedEntries: FeedEntry[],
inferenceExampleIntervalMs = DEFAULT_INFERENCE_EXAMPLE_INTERVAL_MS,
inferenceExampleCount = 5, numPasses?: number) {
if (this.eventObserver.inferenceExamplesCallback == null &&
this.eventObserver.inferenceExamplesPerSecCallback == null) {
throw new Error(
'Cannot start inference loop, no inference example or ' +
'examples/sec observer provided.');
}
// Make sure the feed values are providers, and not NDArrays.
for (let i = 0; i < inferenceFeedEntries.length; i++) {
const feedEntry = inferenceFeedEntries[i];
if (feedEntry.data instanceof Tensor) {
throw new Error(
'Cannot start inference on the model runner with feed entries of ' +
'type NDArray. Please use InputProviders.');
}
}
this.inferenceExampleIntervalMs = inferenceExampleIntervalMs;
this.inferenceTensor = inferenceTensor;
this.inferenceFeedEntries = inferenceFeedEntries;
this.inferenceExampleCount = inferenceExampleCount;
this.currentInferenceLoopNumPasses = numPasses;
if (!this.isInferring) {
this.inferencePassesThisRun = 0;
requestAnimationFrame(() => this.inferNetwork());
}
this.isInferring = true;
}
private inferNetwork() {
if (!this.isInferring ||
this.inferencePassesThisRun === this.currentInferenceLoopNumPasses) {
return;
}
tidy(() => {
const feeds: FeedEntry[][] = [];
const inferenceValues: Tensor[] = [];
const start = performance.now();
for (let i = 0; i < this.inferenceExampleCount; i++) {
// Populate a new FeedEntry[] populated with NDArrays.
const ndarrayFeedEntries: FeedEntry[] = [];
for (let j = 0; j < this.inferenceFeedEntries.length; j++) {
const feedEntry = this.inferenceFeedEntries[j];
const nextCopy = (feedEntry.data as InputProvider).getNextCopy();
ndarrayFeedEntries.push({tensor: feedEntry.tensor, data: nextCopy});
}
feeds.push(ndarrayFeedEntries);
inferenceValues.push(
this.session.eval(this.inferenceTensor, ndarrayFeedEntries));
}
if (this.eventObserver.inferenceExamplesPerSecCallback != null) {
// Force a GPU download, since inference results are generally needed on
// the CPU and it's more fair to include blocking on the GPU to complete
// its work for the inference measurement.
inferenceValues[inferenceValues.length - 1].dataSync();
const inferenceExamplesPerSecTime = performance.now() - start;
const examplesPerSec =
(this.inferenceExampleCount * 1000 / inferenceExamplesPerSecTime);
this.eventObserver.inferenceExamplesPerSecCallback(examplesPerSec);
}
if (this.eventObserver.inferenceExamplesCallback != null) {
this.eventObserver.inferenceExamplesCallback(feeds, inferenceValues);
}
this.inferencePassesThisRun++;
});
this.lastInferTimeoutID = window.setTimeout(
() => this.inferNetwork(), this.inferenceExampleIntervalMs);
}
| stopInferring | identifier_name | |
graph_runner.ts | It allows
* the user to provide a set of callbacks for measurements like cost, accuracy,
* and speed of training.
*/
export class GraphRunner {
private costTensor: SymbolicTensor;
private trainFeedEntries: FeedEntry[];
private batchSize: number;
private optimizer: Optimizer;
private currentTrainLoopNumBatches: number|undefined;
private costIntervalMs: number;
private metricTensor: SymbolicTensor|undefined;
private metricFeedEntries: FeedEntry[]|undefined;
private metricBatchSize: number|undefined;
private metricReduction: MetricReduction;
private metricIntervalMs: number;
private inferenceTensor: SymbolicTensor;
private inferenceFeedEntries: FeedEntry[]|undefined;
private inferenceExampleIntervalMs: number;
private inferenceExampleCount: number;
// Runtime information.
private isTraining: boolean;
private totalBatchesTrained: number;
private batchesTrainedThisRun: number;
private lastComputedMetric: Scalar;
private isInferring: boolean;
private lastInferTimeoutID: number;
private currentInferenceLoopNumPasses: number|undefined;
private inferencePassesThisRun: number;
private trainStartTimestamp: number;
private lastCostTimestamp = 0;
private lastEvalTimestamp = 0;
private zeroScalar: Scalar;
private metricBatchSizeScalar: Scalar;
constructor(
private math: NDArrayMath, private session: Session,
private eventObserver: GraphRunnerEventObserver) {
this.resetStatistics();
this.zeroScalar = Scalar.new(0);
}
resetStatistics() {
this.totalBatchesTrained = 0;
}
/**
* Start the training loop with an optional number of batches to train for.
* Optionally takes a metric tensor and feed entries to compute periodically.
* This can be used for computing accuracy, or a similar metric.
*/
train(
costTensor: SymbolicTensor, trainFeedEntries: FeedEntry[],
batchSize: number, optimizer: Optimizer, numBatches?: number,
metricTensor?: SymbolicTensor, metricFeedEntries?: FeedEntry[],
metricBatchSize?: number, metricReduction = MetricReduction.MEAN,
evalIntervalMs = DEFAULT_EVAL_INTERVAL_MS,
costIntervalMs = DEFAULT_COST_INTERVAL_MS) {
this.costTensor = costTensor;
this.trainFeedEntries = trainFeedEntries;
this.metricTensor = metricTensor;
this.metricFeedEntries = metricFeedEntries;
if (metricBatchSize != null && this.metricBatchSize !== metricBatchSize) {
if (this.metricBatchSizeScalar != null) {
this.metricBatchSizeScalar.dispose();
}
this.metricBatchSizeScalar = Scalar.new(metricBatchSize);
}
this.metricBatchSize = metricBatchSize;
this.metricReduction = metricReduction;
this.batchSize = batchSize;
this.optimizer = optimizer;
this.metricIntervalMs = evalIntervalMs;
this.costIntervalMs = costIntervalMs;
this.currentTrainLoopNumBatches = numBatches;
this.batchesTrainedThisRun = 0;
this.isTraining = true;
this.trainStartTimestamp = performance.now();
this.trainNetwork();
}
stopTraining() {
this.isTraining = false;
}
resumeTraining() {
this.isTraining = true;
this.trainNetwork();
}
private trainNetwork() {
if (this.batchesTrainedThisRun === this.currentTrainLoopNumBatches) {
this.stopTraining();
}
if (!this.isTraining) {
if (this.eventObserver.doneTrainingCallback != null) {
this.eventObserver.doneTrainingCallback();
}
return;
}
const start = performance.now();
const shouldComputeCost = this.eventObserver.avgCostCallback != null &&
(start - this.lastCostTimestamp > this.costIntervalMs);
if (shouldComputeCost) {
this.lastCostTimestamp = start;
}
const costReduction =
shouldComputeCost ? CostReduction.MEAN : CostReduction.NONE;
tidy(() => {
const avgCost = this.session.train(
this.costTensor, this.trainFeedEntries, this.batchSize,
this.optimizer, costReduction);
if (shouldComputeCost) {
const trainTime = performance.now() - start;
this.eventObserver.avgCostCallback(avgCost);
if (this.eventObserver.trainExamplesPerSecCallback != null) {
const examplesPerSec = (this.batchSize * 1000 / trainTime);
this.eventObserver.trainExamplesPerSecCallback(examplesPerSec);
}
}
if (this.eventObserver.metricCallback != null &&
this.metricFeedEntries != null &&
start - this.lastEvalTimestamp > this.metricIntervalMs) {
this.lastEvalTimestamp = start;
if (this.lastComputedMetric != null) {
this.lastComputedMetric.dispose();
}
this.lastComputedMetric = this.computeMetric();
this.eventObserver.metricCallback(this.lastComputedMetric);
}
if (this.eventObserver.totalTimeCallback != null) {
this.eventObserver.totalTimeCallback(
(start - this.trainStartTimestamp) / 1000);
}
this.batchesTrainedThisRun++;
this.totalBatchesTrained++;
if (this.eventObserver.batchesTrainedCallback != null) {
this.eventObserver.batchesTrainedCallback(this.totalBatchesTrained);
}
});
requestAnimationFrame(() => this.trainNetwork());
}
infer(
inferenceTensor: SymbolicTensor, inferenceFeedEntries: FeedEntry[],
inferenceExampleIntervalMs = DEFAULT_INFERENCE_EXAMPLE_INTERVAL_MS,
inferenceExampleCount = 5, numPasses?: number) {
if (this.eventObserver.inferenceExamplesCallback == null &&
this.eventObserver.inferenceExamplesPerSecCallback == null) {
throw new Error(
'Cannot start inference loop, no inference example or ' +
'examples/sec observer provided.');
}
// Make sure the feed values are providers, and not NDArrays.
for (let i = 0; i < inferenceFeedEntries.length; i++) {
const feedEntry = inferenceFeedEntries[i];
if (feedEntry.data instanceof Tensor) {
throw new Error(
'Cannot start inference on the model runner with feed entries of ' +
'type NDArray. Please use InputProviders.');
}
}
this.inferenceExampleIntervalMs = inferenceExampleIntervalMs;
this.inferenceTensor = inferenceTensor;
this.inferenceFeedEntries = inferenceFeedEntries;
this.inferenceExampleCount = inferenceExampleCount;
this.currentInferenceLoopNumPasses = numPasses;
if (!this.isInferring) {
this.inferencePassesThisRun = 0;
requestAnimationFrame(() => this.inferNetwork());
}
this.isInferring = true;
}
private inferNetwork() {
if (!this.isInferring ||
this.inferencePassesThisRun === this.currentInferenceLoopNumPasses) {
return;
}
tidy(() => {
const feeds: FeedEntry[][] = [];
const inferenceValues: Tensor[] = [];
const start = performance.now();
for (let i = 0; i < this.inferenceExampleCount; i++) {
// Populate a new FeedEntry[] populated with NDArrays.
const ndarrayFeedEntries: FeedEntry[] = [];
for (let j = 0; j < this.inferenceFeedEntries.length; j++) {
const feedEntry = this.inferenceFeedEntries[j];
const nextCopy = (feedEntry.data as InputProvider).getNextCopy();
ndarrayFeedEntries.push({tensor: feedEntry.tensor, data: nextCopy});
}
feeds.push(ndarrayFeedEntries);
inferenceValues.push(
this.session.eval(this.inferenceTensor, ndarrayFeedEntries));
}
if (this.eventObserver.inferenceExamplesPerSecCallback != null) {
// Force a GPU download, since inference results are generally needed on
// the CPU and it's more fair to include blocking on the GPU to complete
// its work for the inference measurement.
inferenceValues[inferenceValues.length - 1].dataSync();
const inferenceExamplesPerSecTime = performance.now() - start;
const examplesPerSec =
(this.inferenceExampleCount * 1000 / inferenceExamplesPerSecTime);
this.eventObserver.inferenceExamplesPerSecCallback(examplesPerSec);
}
if (this.eventObserver.inferenceExamplesCallback != null) {
this.eventObserver.inferenceExamplesCallback(feeds, inferenceValues);
}
this.inferencePassesThisRun++;
});
this.lastInferTimeoutID = window.setTimeout(
() => this.inferNetwork(), this.inferenceExampleIntervalMs);
}
stopInferring() {
this.isInferring = false;
window.clearTimeout(this.lastInferTimeoutID);
}
isInferenceRunning(): boolean {
return this.isInferring;
}
computeMetric(): Scalar {
if (this.metricFeedEntries == null) {
throw new Error('Cannot compute metric, no metric FeedEntries provided.');
}
let metric = this.zeroScalar;
return tidy(() => {
for (let i = 0; i < this.metricBatchSize; i++) {
const metricValue =
this.session.eval(this.metricTensor, this.metricFeedEntries) as
Tensor;
metric = this.math.add(metric, metricValue.toFloat());
}
if (this.metricReduction === MetricReduction.MEAN) | {
metric = this.math.divide(metric, this.metricBatchSizeScalar);
} | conditional_block | |
graph_runner.ts | Entries: FeedEntry[];
private batchSize: number;
private optimizer: Optimizer;
private currentTrainLoopNumBatches: number|undefined;
private costIntervalMs: number;
private metricTensor: SymbolicTensor|undefined;
private metricFeedEntries: FeedEntry[]|undefined;
private metricBatchSize: number|undefined;
private metricReduction: MetricReduction;
private metricIntervalMs: number;
private inferenceTensor: SymbolicTensor;
private inferenceFeedEntries: FeedEntry[]|undefined;
private inferenceExampleIntervalMs: number;
private inferenceExampleCount: number;
// Runtime information.
private isTraining: boolean;
private totalBatchesTrained: number;
private batchesTrainedThisRun: number;
private lastComputedMetric: Scalar;
private isInferring: boolean;
private lastInferTimeoutID: number;
private currentInferenceLoopNumPasses: number|undefined;
private inferencePassesThisRun: number;
private trainStartTimestamp: number;
private lastCostTimestamp = 0;
private lastEvalTimestamp = 0;
private zeroScalar: Scalar;
private metricBatchSizeScalar: Scalar;
constructor(
private math: NDArrayMath, private session: Session,
private eventObserver: GraphRunnerEventObserver) {
this.resetStatistics();
this.zeroScalar = Scalar.new(0);
}
resetStatistics() {
this.totalBatchesTrained = 0;
}
/**
* Start the training loop with an optional number of batches to train for.
* Optionally takes a metric tensor and feed entries to compute periodically.
* This can be used for computing accuracy, or a similar metric.
*/
train(
costTensor: SymbolicTensor, trainFeedEntries: FeedEntry[],
batchSize: number, optimizer: Optimizer, numBatches?: number,
metricTensor?: SymbolicTensor, metricFeedEntries?: FeedEntry[],
metricBatchSize?: number, metricReduction = MetricReduction.MEAN,
evalIntervalMs = DEFAULT_EVAL_INTERVAL_MS,
costIntervalMs = DEFAULT_COST_INTERVAL_MS) {
this.costTensor = costTensor;
this.trainFeedEntries = trainFeedEntries;
this.metricTensor = metricTensor;
this.metricFeedEntries = metricFeedEntries;
if (metricBatchSize != null && this.metricBatchSize !== metricBatchSize) {
if (this.metricBatchSizeScalar != null) {
this.metricBatchSizeScalar.dispose();
}
this.metricBatchSizeScalar = Scalar.new(metricBatchSize);
}
this.metricBatchSize = metricBatchSize;
this.metricReduction = metricReduction;
this.batchSize = batchSize;
this.optimizer = optimizer;
this.metricIntervalMs = evalIntervalMs;
this.costIntervalMs = costIntervalMs;
this.currentTrainLoopNumBatches = numBatches;
this.batchesTrainedThisRun = 0;
this.isTraining = true;
this.trainStartTimestamp = performance.now();
this.trainNetwork();
}
stopTraining() {
this.isTraining = false;
}
resumeTraining() {
this.isTraining = true;
this.trainNetwork();
}
private trainNetwork() {
if (this.batchesTrainedThisRun === this.currentTrainLoopNumBatches) {
this.stopTraining();
}
if (!this.isTraining) {
if (this.eventObserver.doneTrainingCallback != null) {
this.eventObserver.doneTrainingCallback();
}
return;
}
const start = performance.now();
const shouldComputeCost = this.eventObserver.avgCostCallback != null &&
(start - this.lastCostTimestamp > this.costIntervalMs);
if (shouldComputeCost) {
this.lastCostTimestamp = start;
}
const costReduction =
shouldComputeCost ? CostReduction.MEAN : CostReduction.NONE;
tidy(() => {
const avgCost = this.session.train(
this.costTensor, this.trainFeedEntries, this.batchSize,
this.optimizer, costReduction);
if (shouldComputeCost) {
const trainTime = performance.now() - start;
this.eventObserver.avgCostCallback(avgCost);
if (this.eventObserver.trainExamplesPerSecCallback != null) {
const examplesPerSec = (this.batchSize * 1000 / trainTime);
this.eventObserver.trainExamplesPerSecCallback(examplesPerSec);
}
}
if (this.eventObserver.metricCallback != null &&
this.metricFeedEntries != null &&
start - this.lastEvalTimestamp > this.metricIntervalMs) {
this.lastEvalTimestamp = start;
if (this.lastComputedMetric != null) {
this.lastComputedMetric.dispose();
}
this.lastComputedMetric = this.computeMetric();
this.eventObserver.metricCallback(this.lastComputedMetric);
}
if (this.eventObserver.totalTimeCallback != null) {
this.eventObserver.totalTimeCallback(
(start - this.trainStartTimestamp) / 1000);
}
this.batchesTrainedThisRun++;
this.totalBatchesTrained++;
if (this.eventObserver.batchesTrainedCallback != null) {
this.eventObserver.batchesTrainedCallback(this.totalBatchesTrained);
}
});
requestAnimationFrame(() => this.trainNetwork());
}
infer(
inferenceTensor: SymbolicTensor, inferenceFeedEntries: FeedEntry[],
inferenceExampleIntervalMs = DEFAULT_INFERENCE_EXAMPLE_INTERVAL_MS,
inferenceExampleCount = 5, numPasses?: number) {
if (this.eventObserver.inferenceExamplesCallback == null &&
this.eventObserver.inferenceExamplesPerSecCallback == null) {
throw new Error(
'Cannot start inference loop, no inference example or ' +
'examples/sec observer provided.');
}
// Make sure the feed values are providers, and not NDArrays.
for (let i = 0; i < inferenceFeedEntries.length; i++) {
const feedEntry = inferenceFeedEntries[i];
if (feedEntry.data instanceof Tensor) {
throw new Error(
'Cannot start inference on the model runner with feed entries of ' +
'type NDArray. Please use InputProviders.');
}
}
this.inferenceExampleIntervalMs = inferenceExampleIntervalMs;
this.inferenceTensor = inferenceTensor;
this.inferenceFeedEntries = inferenceFeedEntries;
this.inferenceExampleCount = inferenceExampleCount;
this.currentInferenceLoopNumPasses = numPasses;
if (!this.isInferring) {
this.inferencePassesThisRun = 0;
requestAnimationFrame(() => this.inferNetwork());
}
this.isInferring = true;
}
private inferNetwork() {
if (!this.isInferring ||
this.inferencePassesThisRun === this.currentInferenceLoopNumPasses) {
return;
}
tidy(() => {
const feeds: FeedEntry[][] = [];
const inferenceValues: Tensor[] = [];
const start = performance.now();
for (let i = 0; i < this.inferenceExampleCount; i++) {
// Populate a new FeedEntry[] populated with NDArrays.
const ndarrayFeedEntries: FeedEntry[] = [];
for (let j = 0; j < this.inferenceFeedEntries.length; j++) {
const feedEntry = this.inferenceFeedEntries[j];
const nextCopy = (feedEntry.data as InputProvider).getNextCopy();
ndarrayFeedEntries.push({tensor: feedEntry.tensor, data: nextCopy});
}
feeds.push(ndarrayFeedEntries);
inferenceValues.push(
this.session.eval(this.inferenceTensor, ndarrayFeedEntries));
}
if (this.eventObserver.inferenceExamplesPerSecCallback != null) {
// Force a GPU download, since inference results are generally needed on
// the CPU and it's more fair to include blocking on the GPU to complete
// its work for the inference measurement.
inferenceValues[inferenceValues.length - 1].dataSync();
const inferenceExamplesPerSecTime = performance.now() - start;
const examplesPerSec =
(this.inferenceExampleCount * 1000 / inferenceExamplesPerSecTime);
this.eventObserver.inferenceExamplesPerSecCallback(examplesPerSec);
}
if (this.eventObserver.inferenceExamplesCallback != null) {
this.eventObserver.inferenceExamplesCallback(feeds, inferenceValues);
}
this.inferencePassesThisRun++;
});
this.lastInferTimeoutID = window.setTimeout(
() => this.inferNetwork(), this.inferenceExampleIntervalMs);
}
stopInferring() {
this.isInferring = false;
window.clearTimeout(this.lastInferTimeoutID);
}
isInferenceRunning(): boolean {
return this.isInferring;
}
computeMetric(): Scalar {
if (this.metricFeedEntries == null) {
throw new Error('Cannot compute metric, no metric FeedEntries provided.');
}
let metric = this.zeroScalar;
return tidy(() => {
for (let i = 0; i < this.metricBatchSize; i++) {
const metricValue =
this.session.eval(this.metricTensor, this.metricFeedEntries) as
Tensor;
metric = this.math.add(metric, metricValue.toFloat());
}
if (this.metricReduction === MetricReduction.MEAN) {
metric = this.math.divide(metric, this.metricBatchSizeScalar);
}
return metric;
});
}
getTotalBatchesTrained(): number {
return this.totalBatchesTrained;
}
getLastComputedMetric(): Scalar | {
return this.lastComputedMetric;
} | identifier_body | |
graph_runner.ts | Entries: FeedEntry[];
private batchSize: number;
private optimizer: Optimizer;
private currentTrainLoopNumBatches: number|undefined;
private costIntervalMs: number;
private metricTensor: SymbolicTensor|undefined;
private metricFeedEntries: FeedEntry[]|undefined;
private metricBatchSize: number|undefined;
private metricReduction: MetricReduction;
private metricIntervalMs: number;
private inferenceTensor: SymbolicTensor;
private inferenceFeedEntries: FeedEntry[]|undefined;
private inferenceExampleIntervalMs: number;
private inferenceExampleCount: number;
// Runtime information.
private isTraining: boolean;
private totalBatchesTrained: number;
private batchesTrainedThisRun: number;
private lastComputedMetric: Scalar;
private isInferring: boolean;
private lastInferTimeoutID: number;
private currentInferenceLoopNumPasses: number|undefined;
private inferencePassesThisRun: number;
private trainStartTimestamp: number;
private lastCostTimestamp = 0;
private lastEvalTimestamp = 0;
private zeroScalar: Scalar;
private metricBatchSizeScalar: Scalar;
constructor(
private math: NDArrayMath, private session: Session,
private eventObserver: GraphRunnerEventObserver) {
this.resetStatistics();
this.zeroScalar = Scalar.new(0);
}
resetStatistics() {
this.totalBatchesTrained = 0;
}
/**
* Start the training loop with an optional number of batches to train for.
* Optionally takes a metric tensor and feed entries to compute periodically.
* This can be used for computing accuracy, or a similar metric.
*/
train(
costTensor: SymbolicTensor, trainFeedEntries: FeedEntry[],
batchSize: number, optimizer: Optimizer, numBatches?: number,
metricTensor?: SymbolicTensor, metricFeedEntries?: FeedEntry[],
metricBatchSize?: number, metricReduction = MetricReduction.MEAN,
evalIntervalMs = DEFAULT_EVAL_INTERVAL_MS,
costIntervalMs = DEFAULT_COST_INTERVAL_MS) {
this.costTensor = costTensor;
this.trainFeedEntries = trainFeedEntries;
this.metricTensor = metricTensor;
this.metricFeedEntries = metricFeedEntries;
if (metricBatchSize != null && this.metricBatchSize !== metricBatchSize) {
if (this.metricBatchSizeScalar != null) {
this.metricBatchSizeScalar.dispose();
}
this.metricBatchSizeScalar = Scalar.new(metricBatchSize);
}
this.metricBatchSize = metricBatchSize;
this.metricReduction = metricReduction;
this.batchSize = batchSize;
this.optimizer = optimizer;
this.metricIntervalMs = evalIntervalMs;
this.costIntervalMs = costIntervalMs;
this.currentTrainLoopNumBatches = numBatches;
this.batchesTrainedThisRun = 0;
this.isTraining = true;
this.trainStartTimestamp = performance.now();
this.trainNetwork();
}
stopTraining() {
this.isTraining = false;
}
resumeTraining() {
this.isTraining = true;
this.trainNetwork();
}
private trainNetwork() {
if (this.batchesTrainedThisRun === this.currentTrainLoopNumBatches) {
this.stopTraining();
}
if (!this.isTraining) {
if (this.eventObserver.doneTrainingCallback != null) {
this.eventObserver.doneTrainingCallback();
}
return;
}
const start = performance.now();
const shouldComputeCost = this.eventObserver.avgCostCallback != null &&
(start - this.lastCostTimestamp > this.costIntervalMs);
if (shouldComputeCost) {
this.lastCostTimestamp = start;
}
const costReduction =
shouldComputeCost ? CostReduction.MEAN : CostReduction.NONE;
tidy(() => {
const avgCost = this.session.train(
this.costTensor, this.trainFeedEntries, this.batchSize,
this.optimizer, costReduction);
if (shouldComputeCost) {
const trainTime = performance.now() - start;
this.eventObserver.avgCostCallback(avgCost);
if (this.eventObserver.trainExamplesPerSecCallback != null) {
const examplesPerSec = (this.batchSize * 1000 / trainTime);
this.eventObserver.trainExamplesPerSecCallback(examplesPerSec);
}
}
if (this.eventObserver.metricCallback != null &&
this.metricFeedEntries != null &&
start - this.lastEvalTimestamp > this.metricIntervalMs) {
this.lastEvalTimestamp = start;
if (this.lastComputedMetric != null) {
this.lastComputedMetric.dispose();
}
this.lastComputedMetric = this.computeMetric();
this.eventObserver.metricCallback(this.lastComputedMetric);
}
if (this.eventObserver.totalTimeCallback != null) {
this.eventObserver.totalTimeCallback(
(start - this.trainStartTimestamp) / 1000);
}
this.batchesTrainedThisRun++;
this.totalBatchesTrained++;
if (this.eventObserver.batchesTrainedCallback != null) {
this.eventObserver.batchesTrainedCallback(this.totalBatchesTrained);
}
});
requestAnimationFrame(() => this.trainNetwork());
}
infer(
inferenceTensor: SymbolicTensor, inferenceFeedEntries: FeedEntry[],
inferenceExampleIntervalMs = DEFAULT_INFERENCE_EXAMPLE_INTERVAL_MS,
inferenceExampleCount = 5, numPasses?: number) {
if (this.eventObserver.inferenceExamplesCallback == null &&
this.eventObserver.inferenceExamplesPerSecCallback == null) {
throw new Error(
'Cannot start inference loop, no inference example or ' +
'examples/sec observer provided.');
}
// Make sure the feed values are providers, and not NDArrays.
for (let i = 0; i < inferenceFeedEntries.length; i++) {
const feedEntry = inferenceFeedEntries[i];
if (feedEntry.data instanceof Tensor) {
throw new Error(
'Cannot start inference on the model runner with feed entries of ' +
'type NDArray. Please use InputProviders.');
}
}
this.inferenceExampleIntervalMs = inferenceExampleIntervalMs;
this.inferenceTensor = inferenceTensor;
this.inferenceFeedEntries = inferenceFeedEntries;
this.inferenceExampleCount = inferenceExampleCount;
this.currentInferenceLoopNumPasses = numPasses;
if (!this.isInferring) {
this.inferencePassesThisRun = 0;
requestAnimationFrame(() => this.inferNetwork());
}
this.isInferring = true;
}
private inferNetwork() {
if (!this.isInferring ||
this.inferencePassesThisRun === this.currentInferenceLoopNumPasses) {
return;
}
tidy(() => {
const feeds: FeedEntry[][] = [];
const inferenceValues: Tensor[] = [];
const start = performance.now();
for (let i = 0; i < this.inferenceExampleCount; i++) {
// Populate a new FeedEntry[] populated with NDArrays.
const ndarrayFeedEntries: FeedEntry[] = [];
for (let j = 0; j < this.inferenceFeedEntries.length; j++) {
const feedEntry = this.inferenceFeedEntries[j];
const nextCopy = (feedEntry.data as InputProvider).getNextCopy();
ndarrayFeedEntries.push({tensor: feedEntry.tensor, data: nextCopy});
}
feeds.push(ndarrayFeedEntries);
inferenceValues.push(
this.session.eval(this.inferenceTensor, ndarrayFeedEntries));
}
if (this.eventObserver.inferenceExamplesPerSecCallback != null) {
// Force a GPU download, since inference results are generally needed on
// the CPU and it's more fair to include blocking on the GPU to complete
// its work for the inference measurement.
inferenceValues[inferenceValues.length - 1].dataSync();
const inferenceExamplesPerSecTime = performance.now() - start;
const examplesPerSec =
(this.inferenceExampleCount * 1000 / inferenceExamplesPerSecTime);
this.eventObserver.inferenceExamplesPerSecCallback(examplesPerSec);
}
if (this.eventObserver.inferenceExamplesCallback != null) {
this.eventObserver.inferenceExamplesCallback(feeds, inferenceValues);
}
this.inferencePassesThisRun++;
});
this.lastInferTimeoutID = window.setTimeout(
() => this.inferNetwork(), this.inferenceExampleIntervalMs);
}
stopInferring() {
this.isInferring = false;
window.clearTimeout(this.lastInferTimeoutID);
}
isInferenceRunning(): boolean {
return this.isInferring;
}
computeMetric(): Scalar {
if (this.metricFeedEntries == null) {
throw new Error('Cannot compute metric, no metric FeedEntries provided.');
}
let metric = this.zeroScalar;
return tidy(() => {
for (let i = 0; i < this.metricBatchSize; i++) {
const metricValue =
this.session.eval(this.metricTensor, this.metricFeedEntries) as
Tensor;
metric = this.math.add(metric, metricValue.toFloat());
}
if (this.metricReduction === MetricReduction.MEAN) {
metric = this.math.divide(metric, this.metricBatchSizeScalar);
}
return metric;
});
}
getTotalBatchesTrained(): number {
return this.totalBatchesTrained;
} |
getLastComputedMetric(): Scalar {
return this.lastComputedMetric;
} | random_line_split | |
training_modified.py | batch includes a training image, its mask and the 7 augmented versions of it, which are generated on the fly
# batch size means in this case how many original images are loaded for 1 batch,
# the actual batch size is 8 times higher.
# based on https://www.kaggle.com/mukulkr/camvid-segmentation-using-unet
class DataGenerator(Sequence):
def __init__(self, pair, batch_size, shuffle):
self.pair = pair
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
self.map = dict()
# returns the length of the data set
def __len__(self):
return int(tf.math.floor(len(self.pair) / self.batch_size)) * 2
# returns a single batch
def __getitem__(self, index):
# a list that has the indexes of the pairs from which we want to generate images and masks for the batch
index = index // 2
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [k for k in indexes]
if index in self.map:
return self.__data_generation(self.map[index], second_half=True)
self.map[index] = list_IDs_temp
X, Y = self.__data_generation(list_IDs_temp)
return X, Y
# resets the pair indexes after each epoch and shuffles the indexes so that the batches
# are in different order for every epoch
def on_epoch_end(self):
self.indexes = tf.range(len(self.pair))
self.map = dict()
if self.shuffle == True:
tf.random.shuffle(self.indexes)
# generates a batch
def __data_generation(self, list_IDs_temp, second_half=False):
batch_images = []
batch_masks = []
for i in list_IDs_temp:
# parses the image and the mask of the current pair and then generates the augmented versions
# it wasn't possible to generate the augmented images beforehand and have completely
# random images in every batch
image1 = parse_image(self.pair[i][0])
batch_images.append(image1)
mask1 = parse_mask(self.pair[i][1])
mask1 = to_one_hot(mask1)
batch_masks.append(mask1)
image2 = tf.image.flip_left_right(image1)
batch_images.append(image2)
mask2 = tf.image.flip_left_right(mask1)
batch_masks.append(mask2)
image3 = tf.image.flip_up_down(image1)
batch_images.append(image3)
mask3 = tf.image.flip_up_down(mask1)
batch_masks.append(mask3)
image4 = tf.image.flip_up_down(image1)
image4 = tf.image.flip_left_right(image4)
batch_images.append(image4)
mask4 = tf.image.flip_up_down(mask1)
mask4 = tf.image.flip_left_right(mask4)
batch_masks.append(mask4)
if second_half:
# images and masks 1 to 4 but with randomly changed brightness
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image5 = tf.image.adjust_brightness(image1, delta)
batch_images.append(image5)
batch_masks.append(mask1)
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image6 = tf.image.flip_left_right(image1)
image6 = tf.image.adjust_brightness(image6, delta)
batch_images.append(image6)
mask6 = tf.image.flip_left_right(mask1)
batch_masks.append(mask6)
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image7 = tf.image.flip_up_down(image1)
image7 = tf.image.adjust_brightness(image7, delta)
batch_images.append(image7)
mask7 = tf.image.flip_up_down(mask1)
batch_masks.append(mask7)
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image8 = tf.image.flip_up_down(image1)
image8 = tf.image.flip_left_right(image8)
image8 = tf.image.adjust_brightness(image8, delta)
batch_images.append(image8)
mask8 = tf.image.flip_up_down(mask1)
mask8 = tf.image.flip_left_right(mask8)
batch_masks.append(mask8)
return tf.stack(batch_images[4:]), tf.stack(batch_masks[4:])
# stack the images and masks of the batch into two tensors
return tf.stack(batch_images[:4]), tf.stack(batch_masks[:4])
# Data generator that does not augment images, i.e. used for validation and test set
class ValDataGenerator(Sequence):
def __init__(self, pair, batch_size, shuffle):
self.pair = pair
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
# returns the length of the data set
def __len__(self):
return int(tf.math.floor(len(self.pair) / self.batch_size))
# returns a single batch
def __getitem__(self, index):
# a list that has the indexes of the pairs from which we want to generate images and masks for the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [k for k in indexes]
X, Y = self.__data_generation(list_IDs_temp)
return X, Y
# resets the pair indexes after each epoch and shuffles the indexes so that the batches
# are in different order for every epoch
def on_epoch_end(self):
self.indexes = tf.range(len(self.pair))
if self.shuffle == True:
|
# generates a batch
def __data_generation(self, list_IDs_temp, second_half=False):
batch_images = []
batch_masks = []
for i in list_IDs_temp:
# parses the image and the mask of the current pair and then generates the augmented versions
# it wasn't possible to generate the augmented images beforehand and have completely
# random images in every batch
image1 = parse_image(self.pair[i][0])
batch_images.append(image1)
mask1 = parse_mask(self.pair[i][1])
mask1 = to_one_hot(mask1)
batch_masks.append(mask1)
# stack the images and masks of the batch into two tensors
return tf.stack(batch_images), tf.stack(batch_masks)
# takes a path to a directory with two sub folders for training images and masks
# and returns a list of pairs of paths for images and the corresponding masks
def make_pairs(path, set):
pairs = []
# sorted is very important since os.path.join somehow shuffles the paths and we need
# the image and mask paths to have the exact same order
image_paths = sorted(glob(os.path.join(path, set + "_images/*")))
mask_paths = sorted(glob(os.path.join(path, set + "_masks/*")))
#image_paths = sorted(glob(os.path.join(path, "test_images2/*")))
#mask_paths = sorted(glob(os.path.join(path, "test_masks2/*")))
for i in range(len(image_paths)):
pairs.append((image_paths[i], mask_paths[i]))
return pairs
########## LOSS FUNCTION ##########
# based on https://github.com/aruns2120/Semantic-Segmentation-Severstal/blob/master/U-Net/CS2_firstCut.ipynb
# the dice coefficient calculates how much the predicted mask and the correct mask overlap
def dice_coef(y_true, y_predict, smooth=1):
y_true_flat = tf.keras.backend.flatten(y_true)
y_pred_flat = tf.keras.backend.flatten(y_predict)
intersection = tf.keras.backend.sum(y_true_flat * y_pred_flat)
return (2. * intersection + smooth) / (tf.keras.backend.sum(y_true_flat) + tf.keras.backend.sum(y_pred_flat) + smooth)
def dice_loss(y_true, y_predict):
return (1 - dice_coef(y_true, y_predict))
# weighted variant of pixelwise_crossentropy
# based on https://www.gitmemory.com/issue/keras-team/keras/6261/569715992
def pixelwise_crossentropy(y_true, y_predicted):#
# weights that scale the error for each class such that they all have equal impact on the loss
# important since the data set is very unbalanced
# weights represent the inverse of the proportion of pixels corresponding to that class in the whole data set
# needs to be divided by 100.0 to keep the error at a similar magnitude during training
weight_proton = 132.0 / 100.0
weight_alpha = 91.0 / 100.0
weight_V = 311.0 / 100.0
weight_electron = 71.0 / 100.0
# weight_proton = 1.0 # for local testing
# weight_alpha = 1.0
# weight_V = 1.0
# weight_electron = 1.0
weights = [weight_proton, weight_alpha, weight_V, weight_electron]
# predicted values get scaled such that they are never exactly 0 or 1 since then the logarithm diverges
| tf.random.shuffle(self.indexes) | conditional_block |
training_modified.py | batch includes a training image, its mask and the 7 augmented versions of it, which are generated on the fly
# batch size means in this case how many original images are loaded for 1 batch,
# the actual batch size is 8 times higher.
# based on https://www.kaggle.com/mukulkr/camvid-segmentation-using-unet
class DataGenerator(Sequence):
def __init__(self, pair, batch_size, shuffle):
self.pair = pair
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
self.map = dict()
# returns the length of the data set
def __len__(self):
return int(tf.math.floor(len(self.pair) / self.batch_size)) * 2
# returns a single batch
def __getitem__(self, index):
# a list that has the indexes of the pairs from which we want to generate images and masks for the batch
index = index // 2
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [k for k in indexes]
if index in self.map:
return self.__data_generation(self.map[index], second_half=True)
self.map[index] = list_IDs_temp
X, Y = self.__data_generation(list_IDs_temp)
return X, Y
# resets the pair indexes after each epoch and shuffles the indexes so that the batches
# are in different order for every epoch
def on_epoch_end(self):
self.indexes = tf.range(len(self.pair))
self.map = dict() | def __data_generation(self, list_IDs_temp, second_half=False):
batch_images = []
batch_masks = []
for i in list_IDs_temp:
# parses the image and the mask of the current pair and then generates the augmented versions
# it wasn't possible to generate the augmented images beforehand and have completely
# random images in every batch
image1 = parse_image(self.pair[i][0])
batch_images.append(image1)
mask1 = parse_mask(self.pair[i][1])
mask1 = to_one_hot(mask1)
batch_masks.append(mask1)
image2 = tf.image.flip_left_right(image1)
batch_images.append(image2)
mask2 = tf.image.flip_left_right(mask1)
batch_masks.append(mask2)
image3 = tf.image.flip_up_down(image1)
batch_images.append(image3)
mask3 = tf.image.flip_up_down(mask1)
batch_masks.append(mask3)
image4 = tf.image.flip_up_down(image1)
image4 = tf.image.flip_left_right(image4)
batch_images.append(image4)
mask4 = tf.image.flip_up_down(mask1)
mask4 = tf.image.flip_left_right(mask4)
batch_masks.append(mask4)
if second_half:
# images and masks 1 to 4 but with randomly changed brightness
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image5 = tf.image.adjust_brightness(image1, delta)
batch_images.append(image5)
batch_masks.append(mask1)
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image6 = tf.image.flip_left_right(image1)
image6 = tf.image.adjust_brightness(image6, delta)
batch_images.append(image6)
mask6 = tf.image.flip_left_right(mask1)
batch_masks.append(mask6)
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image7 = tf.image.flip_up_down(image1)
image7 = tf.image.adjust_brightness(image7, delta)
batch_images.append(image7)
mask7 = tf.image.flip_up_down(mask1)
batch_masks.append(mask7)
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image8 = tf.image.flip_up_down(image1)
image8 = tf.image.flip_left_right(image8)
image8 = tf.image.adjust_brightness(image8, delta)
batch_images.append(image8)
mask8 = tf.image.flip_up_down(mask1)
mask8 = tf.image.flip_left_right(mask8)
batch_masks.append(mask8)
return tf.stack(batch_images[4:]), tf.stack(batch_masks[4:])
# stack the images and masks of the batch into two tensors
return tf.stack(batch_images[:4]), tf.stack(batch_masks[:4])
# Data generator that does not augment images, i.e. used for validation and test set
class ValDataGenerator(Sequence):
def __init__(self, pair, batch_size, shuffle):
self.pair = pair
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
# returns the length of the data set
def __len__(self):
return int(tf.math.floor(len(self.pair) / self.batch_size))
# returns a single batch
def __getitem__(self, index):
# a list that has the indexes of the pairs from which we want to generate images and masks for the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [k for k in indexes]
X, Y = self.__data_generation(list_IDs_temp)
return X, Y
# resets the pair indexes after each epoch and shuffles the indexes so that the batches
# are in different order for every epoch
def on_epoch_end(self):
self.indexes = tf.range(len(self.pair))
if self.shuffle == True:
tf.random.shuffle(self.indexes)
# generates a batch
def __data_generation(self, list_IDs_temp, second_half=False):
batch_images = []
batch_masks = []
for i in list_IDs_temp:
# parses the image and the mask of the current pair and then generates the augmented versions
# it wasn't possible to generate the augmented images beforehand and have completely
# random images in every batch
image1 = parse_image(self.pair[i][0])
batch_images.append(image1)
mask1 = parse_mask(self.pair[i][1])
mask1 = to_one_hot(mask1)
batch_masks.append(mask1)
# stack the images and masks of the batch into two tensors
return tf.stack(batch_images), tf.stack(batch_masks)
# takes a path to a directory with two sub folders for training images and masks
# and returns a list of pairs of paths for images and the corresponding masks
def make_pairs(path, set):
pairs = []
# sorted is very important since os.path.join somehow shuffles the paths and we need
# the image and mask paths to have the exact same order
image_paths = sorted(glob(os.path.join(path, set + "_images/*")))
mask_paths = sorted(glob(os.path.join(path, set + "_masks/*")))
#image_paths = sorted(glob(os.path.join(path, "test_images2/*")))
#mask_paths = sorted(glob(os.path.join(path, "test_masks2/*")))
for i in range(len(image_paths)):
pairs.append((image_paths[i], mask_paths[i]))
return pairs
########## LOSS FUNCTION ##########
# based on https://github.com/aruns2120/Semantic-Segmentation-Severstal/blob/master/U-Net/CS2_firstCut.ipynb
# the dice coefficient calculates how much the predicted mask and the correct mask overlap
def dice_coef(y_true, y_predict, smooth=1):
y_true_flat = tf.keras.backend.flatten(y_true)
y_pred_flat = tf.keras.backend.flatten(y_predict)
intersection = tf.keras.backend.sum(y_true_flat * y_pred_flat)
return (2. * intersection + smooth) / (tf.keras.backend.sum(y_true_flat) + tf.keras.backend.sum(y_pred_flat) + smooth)
def dice_loss(y_true, y_predict):
return (1 - dice_coef(y_true, y_predict))
# weighted variant of pixelwise_crossentropy
# based on https://www.gitmemory.com/issue/keras-team/keras/6261/569715992
def pixelwise_crossentropy(y_true, y_predicted):#
# weights that scale the error for each class such that they all have equal impact on the loss
# important since the data set is very unbalanced
# weights represent the inverse of the proportion of pixels corresponding to that class in the whole data set
# needs to be divided by 100.0 to keep the error at a similar magnitude during training
weight_proton = 132.0 / 100.0
weight_alpha = 91.0 / 100.0
weight_V = 311.0 / 100.0
weight_electron = 71.0 / 100.0
# weight_proton = 1.0 # for local testing
# weight_alpha = 1.0
# weight_V = 1.0
# weight_electron = 1.0
weights = [weight_proton, weight_alpha, weight_V, weight_electron]
# predicted values get scaled such that they are never exactly 0 or 1 since then the logarithm diverges
y_predicted | if self.shuffle == True:
tf.random.shuffle(self.indexes)
# generates a batch | random_line_split |
training_modified.py | batch includes a training image, its mask and the 7 augmented versions of it, which are generated on the fly
# batch size means in this case how many original images are loaded for 1 batch,
# the actual batch size is 8 times higher.
# based on https://www.kaggle.com/mukulkr/camvid-segmentation-using-unet
class DataGenerator(Sequence):
def __init__(self, pair, batch_size, shuffle):
self.pair = pair
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
self.map = dict()
# returns the length of the data set
def __len__(self):
return int(tf.math.floor(len(self.pair) / self.batch_size)) * 2
# returns a single batch
def __getitem__(self, index):
# a list that has the indexes of the pairs from which we want to generate images and masks for the batch
index = index // 2
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [k for k in indexes]
if index in self.map:
return self.__data_generation(self.map[index], second_half=True)
self.map[index] = list_IDs_temp
X, Y = self.__data_generation(list_IDs_temp)
return X, Y
# resets the pair indexes after each epoch and shuffles the indexes so that the batches
# are in different order for every epoch
def on_epoch_end(self):
self.indexes = tf.range(len(self.pair))
self.map = dict()
if self.shuffle == True:
tf.random.shuffle(self.indexes)
# generates a batch
def __data_generation(self, list_IDs_temp, second_half=False):
batch_images = []
batch_masks = []
for i in list_IDs_temp:
# parses the image and the mask of the current pair and then generates the augmented versions
# it wasn't possible to generate the augmented images beforehand and have completely
# random images in every batch
image1 = parse_image(self.pair[i][0])
batch_images.append(image1)
mask1 = parse_mask(self.pair[i][1])
mask1 = to_one_hot(mask1)
batch_masks.append(mask1)
image2 = tf.image.flip_left_right(image1)
batch_images.append(image2)
mask2 = tf.image.flip_left_right(mask1)
batch_masks.append(mask2)
image3 = tf.image.flip_up_down(image1)
batch_images.append(image3)
mask3 = tf.image.flip_up_down(mask1)
batch_masks.append(mask3)
image4 = tf.image.flip_up_down(image1)
image4 = tf.image.flip_left_right(image4)
batch_images.append(image4)
mask4 = tf.image.flip_up_down(mask1)
mask4 = tf.image.flip_left_right(mask4)
batch_masks.append(mask4)
if second_half:
# images and masks 1 to 4 but with randomly changed brightness
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image5 = tf.image.adjust_brightness(image1, delta)
batch_images.append(image5)
batch_masks.append(mask1)
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image6 = tf.image.flip_left_right(image1)
image6 = tf.image.adjust_brightness(image6, delta)
batch_images.append(image6)
mask6 = tf.image.flip_left_right(mask1)
batch_masks.append(mask6)
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image7 = tf.image.flip_up_down(image1)
image7 = tf.image.adjust_brightness(image7, delta)
batch_images.append(image7)
mask7 = tf.image.flip_up_down(mask1)
batch_masks.append(mask7)
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image8 = tf.image.flip_up_down(image1)
image8 = tf.image.flip_left_right(image8)
image8 = tf.image.adjust_brightness(image8, delta)
batch_images.append(image8)
mask8 = tf.image.flip_up_down(mask1)
mask8 = tf.image.flip_left_right(mask8)
batch_masks.append(mask8)
return tf.stack(batch_images[4:]), tf.stack(batch_masks[4:])
# stack the images and masks of the batch into two tensors
return tf.stack(batch_images[:4]), tf.stack(batch_masks[:4])
# Data generator that does not augment images, i.e. used for validation and test set
class ValDataGenerator(Sequence):
def __init__(self, pair, batch_size, shuffle):
self.pair = pair
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
# returns the length of the data set
def | (self):
return int(tf.math.floor(len(self.pair) / self.batch_size))
# returns a single batch
def __getitem__(self, index):
# a list that has the indexes of the pairs from which we want to generate images and masks for the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [k for k in indexes]
X, Y = self.__data_generation(list_IDs_temp)
return X, Y
# resets the pair indexes after each epoch and shuffles the indexes so that the batches
# are in different order for every epoch
def on_epoch_end(self):
self.indexes = tf.range(len(self.pair))
if self.shuffle == True:
tf.random.shuffle(self.indexes)
# generates a batch
def __data_generation(self, list_IDs_temp, second_half=False):
batch_images = []
batch_masks = []
for i in list_IDs_temp:
# parses the image and the mask of the current pair and then generates the augmented versions
# it wasn't possible to generate the augmented images beforehand and have completely
# random images in every batch
image1 = parse_image(self.pair[i][0])
batch_images.append(image1)
mask1 = parse_mask(self.pair[i][1])
mask1 = to_one_hot(mask1)
batch_masks.append(mask1)
# stack the images and masks of the batch into two tensors
return tf.stack(batch_images), tf.stack(batch_masks)
# takes a path to a directory with two sub folders for training images and masks
# and returns a list of pairs of paths for images and the corresponding masks
def make_pairs(path, set):
pairs = []
# sorted is very important since os.path.join somehow shuffles the paths and we need
# the image and mask paths to have the exact same order
image_paths = sorted(glob(os.path.join(path, set + "_images/*")))
mask_paths = sorted(glob(os.path.join(path, set + "_masks/*")))
#image_paths = sorted(glob(os.path.join(path, "test_images2/*")))
#mask_paths = sorted(glob(os.path.join(path, "test_masks2/*")))
for i in range(len(image_paths)):
pairs.append((image_paths[i], mask_paths[i]))
return pairs
########## LOSS FUNCTION ##########
# based on https://github.com/aruns2120/Semantic-Segmentation-Severstal/blob/master/U-Net/CS2_firstCut.ipynb
# the dice coefficient calculates how much the predicted mask and the correct mask overlap
def dice_coef(y_true, y_predict, smooth=1):
y_true_flat = tf.keras.backend.flatten(y_true)
y_pred_flat = tf.keras.backend.flatten(y_predict)
intersection = tf.keras.backend.sum(y_true_flat * y_pred_flat)
return (2. * intersection + smooth) / (tf.keras.backend.sum(y_true_flat) + tf.keras.backend.sum(y_pred_flat) + smooth)
def dice_loss(y_true, y_predict):
return (1 - dice_coef(y_true, y_predict))
# weighted variant of pixelwise_crossentropy
# based on https://www.gitmemory.com/issue/keras-team/keras/6261/569715992
def pixelwise_crossentropy(y_true, y_predicted):#
# weights that scale the error for each class such that they all have equal impact on the loss
# important since the data set is very unbalanced
# weights represent the inverse of the proportion of pixels corresponding to that class in the whole data set
# needs to be divided by 100.0 to keep the error at a similar magnitude during training
weight_proton = 132.0 / 100.0
weight_alpha = 91.0 / 100.0
weight_V = 311.0 / 100.0
weight_electron = 71.0 / 100.0
# weight_proton = 1.0 # for local testing
# weight_alpha = 1.0
# weight_V = 1.0
# weight_electron = 1.0
weights = [weight_proton, weight_alpha, weight_V, weight_electron]
# predicted values get scaled such that they are never exactly 0 or 1 since then the logarithm diverges
| __len__ | identifier_name |
training_modified.py | returns the length of the data set
def __len__(self):
return int(tf.math.floor(len(self.pair) / self.batch_size)) * 2
# returns a single batch
def __getitem__(self, index):
# a list that has the indexes of the pairs from which we want to generate images and masks for the batch
index = index // 2
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [k for k in indexes]
if index in self.map:
return self.__data_generation(self.map[index], second_half=True)
self.map[index] = list_IDs_temp
X, Y = self.__data_generation(list_IDs_temp)
return X, Y
# resets the pair indexes after each epoch and shuffles the indexes so that the batches
# are in different order for every epoch
def on_epoch_end(self):
self.indexes = tf.range(len(self.pair))
self.map = dict()
if self.shuffle == True:
tf.random.shuffle(self.indexes)
# generates a batch
def __data_generation(self, list_IDs_temp, second_half=False):
batch_images = []
batch_masks = []
for i in list_IDs_temp:
# parses the image and the mask of the current pair and then generates the augmented versions
# it wasn't possible to generate the augmented images beforehand and have completely
# random images in every batch
image1 = parse_image(self.pair[i][0])
batch_images.append(image1)
mask1 = parse_mask(self.pair[i][1])
mask1 = to_one_hot(mask1)
batch_masks.append(mask1)
image2 = tf.image.flip_left_right(image1)
batch_images.append(image2)
mask2 = tf.image.flip_left_right(mask1)
batch_masks.append(mask2)
image3 = tf.image.flip_up_down(image1)
batch_images.append(image3)
mask3 = tf.image.flip_up_down(mask1)
batch_masks.append(mask3)
image4 = tf.image.flip_up_down(image1)
image4 = tf.image.flip_left_right(image4)
batch_images.append(image4)
mask4 = tf.image.flip_up_down(mask1)
mask4 = tf.image.flip_left_right(mask4)
batch_masks.append(mask4)
if second_half:
# images and masks 1 to 4 but with randomly changed brightness
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image5 = tf.image.adjust_brightness(image1, delta)
batch_images.append(image5)
batch_masks.append(mask1)
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image6 = tf.image.flip_left_right(image1)
image6 = tf.image.adjust_brightness(image6, delta)
batch_images.append(image6)
mask6 = tf.image.flip_left_right(mask1)
batch_masks.append(mask6)
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image7 = tf.image.flip_up_down(image1)
image7 = tf.image.adjust_brightness(image7, delta)
batch_images.append(image7)
mask7 = tf.image.flip_up_down(mask1)
batch_masks.append(mask7)
delta = tf.random.uniform(shape=[], minval=-0.5, maxval=0.51)
image8 = tf.image.flip_up_down(image1)
image8 = tf.image.flip_left_right(image8)
image8 = tf.image.adjust_brightness(image8, delta)
batch_images.append(image8)
mask8 = tf.image.flip_up_down(mask1)
mask8 = tf.image.flip_left_right(mask8)
batch_masks.append(mask8)
return tf.stack(batch_images[4:]), tf.stack(batch_masks[4:])
# stack the images and masks of the batch into two tensors
return tf.stack(batch_images[:4]), tf.stack(batch_masks[:4])
# Data generator that does not augment images, i.e. used for validation and test set
class ValDataGenerator(Sequence):
def __init__(self, pair, batch_size, shuffle):
self.pair = pair
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
# returns the length of the data set
def __len__(self):
return int(tf.math.floor(len(self.pair) / self.batch_size))
# returns a single batch
def __getitem__(self, index):
# a list that has the indexes of the pairs from which we want to generate images and masks for the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [k for k in indexes]
X, Y = self.__data_generation(list_IDs_temp)
return X, Y
# resets the pair indexes after each epoch and shuffles the indexes so that the batches
# are in different order for every epoch
def on_epoch_end(self):
self.indexes = tf.range(len(self.pair))
if self.shuffle == True:
tf.random.shuffle(self.indexes)
# generates a batch
def __data_generation(self, list_IDs_temp, second_half=False):
batch_images = []
batch_masks = []
for i in list_IDs_temp:
# parses the image and the mask of the current pair and then generates the augmented versions
# it wasn't possible to generate the augmented images beforehand and have completely
# random images in every batch
image1 = parse_image(self.pair[i][0])
batch_images.append(image1)
mask1 = parse_mask(self.pair[i][1])
mask1 = to_one_hot(mask1)
batch_masks.append(mask1)
# stack the images and masks of the batch into two tensors
return tf.stack(batch_images), tf.stack(batch_masks)
# takes a path to a directory with two sub folders for training images and masks
# and returns a list of pairs of paths for images and the corresponding masks
def make_pairs(path, set):
pairs = []
# sorted is very important since os.path.join somehow shuffles the paths and we need
# the image and mask paths to have the exact same order
image_paths = sorted(glob(os.path.join(path, set + "_images/*")))
mask_paths = sorted(glob(os.path.join(path, set + "_masks/*")))
#image_paths = sorted(glob(os.path.join(path, "test_images2/*")))
#mask_paths = sorted(glob(os.path.join(path, "test_masks2/*")))
for i in range(len(image_paths)):
pairs.append((image_paths[i], mask_paths[i]))
return pairs
########## LOSS FUNCTION ##########
# based on https://github.com/aruns2120/Semantic-Segmentation-Severstal/blob/master/U-Net/CS2_firstCut.ipynb
# the dice coefficient calculates how much the predicted mask and the correct mask overlap
def dice_coef(y_true, y_predict, smooth=1):
y_true_flat = tf.keras.backend.flatten(y_true)
y_pred_flat = tf.keras.backend.flatten(y_predict)
intersection = tf.keras.backend.sum(y_true_flat * y_pred_flat)
return (2. * intersection + smooth) / (tf.keras.backend.sum(y_true_flat) + tf.keras.backend.sum(y_pred_flat) + smooth)
def dice_loss(y_true, y_predict):
return (1 - dice_coef(y_true, y_predict))
# weighted variant of pixelwise_crossentropy
# based on https://www.gitmemory.com/issue/keras-team/keras/6261/569715992
def pixelwise_crossentropy(y_true, y_predicted):#
# weights that scale the error for each class such that they all have equal impact on the loss
# important since the data set is very unbalanced
# weights represent the inverse of the proportion of pixels corresponding to that class in the whole data set
# needs to be divided by 100.0 to keep the error at a similar magnitude during training
weight_proton = 132.0 / 100.0
weight_alpha = 91.0 / 100.0
weight_V = 311.0 / 100.0
weight_electron = 71.0 / 100.0
# weight_proton = 1.0 # for local testing
# weight_alpha = 1.0
# weight_V = 1.0
# weight_electron = 1.0
weights = [weight_proton, weight_alpha, weight_V, weight_electron]
# predicted values get scaled such that they are never exactly 0 or 1 since then the logarithm diverges
y_predicted /= tf.keras.backend.sum(y_predicted, axis=-1, keepdims=True)
y_predicted = tf.keras.backend.clip(y_predicted,
tf.keras.backend.epsilon(),
1. - tf.keras.backend.epsilon())
# compute the weighted cross_entropy
loss = y_true * tf.keras.backend.log(y_predicted)
loss = -tf.keras.backend.sum(loss * weights, -1)
return loss
# defines the custom loss function, sum of dice_loss and pixelwise_crossentropy
def pce_dice_loss(y_true, y_predict):
| return pixelwise_crossentropy(y_true, y_predict) + dice_loss(y_true, y_predict) | identifier_body | |
framebuffer.rs | PassAbstract + Send + Sync> = return;
/// # let my_image: Arc<vulkano::image::ImageViewAccess> = return;
/// // let render_pass: Arc<RenderPassAbstract + Send + Sync> = ...;
/// let framebuffer = Framebuffer::new(render_pass.clone(), [1024, 768, 1],
/// vec![my_image.clone() as Arc<_>]).unwrap();
/// ```
///
/// ## With a specialized list of attachments
///
/// The list of attachments can also be of any type `T`, as long as the render pass description
/// implements the trait `RenderPassDescAttachmentsList<T>`.
///
/// For example if you pass a render pass object that implements
/// `RenderPassDescAttachmentsList<Foo>`, then you can pass a `Foo` as the list of attachments.
///
/// > **Note**: The reason why `Vec<Arc<ImageView + Send + Sync>>` always works (see previous section) is that
/// > render pass descriptions are required to always implement
/// > `RenderPassDescAttachmentsList<Vec<Arc<ImageViewAccess + Send + Sync>>>`.
///
/// When it comes to the `single_pass_renderpass!` and `ordered_passes_renderpass!` macros, you can
/// build a list of attachments by calling `start_attachments()` on the render pass description,
/// which will return an object that has a method whose name is the name of the first attachment
/// and that can be used to specify it. This method will return another object that has a method
/// whose name is the name of the second attachment, and so on. See the documentation of the macros
/// for more details. TODO: put link here
///
/// ```ignore // FIXME: unignore
/// # #[macro_use] extern crate vulkano;
/// # fn main() {
/// # let device: std::sync::Arc<vulkano::device::Device> = return;
/// use std::sync::Arc;
/// use vulkano::format::Format;
/// use vulkano::framebuffer::Framebuffer;
///
/// let render_pass = single_pass_renderpass!(device.clone(),
/// attachments: {
/// // `foo` is a custom name we give to the first and only attachment.
/// foo: {
/// load: Clear,
/// store: Store,
/// format: Format::R8G8B8A8Unorm,
/// samples: 1,
/// }
/// },
/// pass: {
/// color: [foo], // Repeat the attachment name here.
/// depth_stencil: {}
/// }
/// ).unwrap();
///
/// # let my_image: Arc<vulkano::image::ImageViewAccess> = return;
/// let framebuffer = {
/// let atch = render_pass.desc().start_attachments().foo(my_image.clone() as Arc<_>);
/// Framebuffer::new(render_pass, [1024, 768, 1], atch).unwrap()
/// };
/// # }
/// ```
#[derive(Debug)]
pub struct Framebuffer<Rp, A> {
device: Arc<Device>,
render_pass: Rp,
framebuffer: vk::Framebuffer,
dimensions: [u32; 3],
resources: A,
}
impl<Rp> Framebuffer<Rp, Box<AttachmentsList + Send + Sync>> {
/// Builds a new framebuffer.
///
/// The `attachments` parameter depends on which render pass implementation is used.
// TODO: allow ImageView
pub fn new<Ia>(render_pass: Rp, dimensions: [u32; 3], attachments: Ia)
-> Result<Arc<Framebuffer<Rp, Box<AttachmentsList + Send + Sync>>>, FramebufferCreationError>
where Rp: RenderPassAbstract + RenderPassDescAttachmentsList<Ia>
{
let device = render_pass.device().clone();
// This function call is supposed to check whether the attachments are valid.
// For more safety, we do some additional `debug_assert`s below.
let attachments = try!(render_pass.check_attachments_list(attachments));
// TODO: add a debug assertion that checks whether the attachments are compatible
// with the RP ; this should be checked by the RenderPassDescAttachmentsList trait
// impl, but we can double-check in debug mode
// Checking the dimensions against the limits.
{
let limits = render_pass.device().physical_device().limits();
let limits = [limits.max_framebuffer_width(), limits.max_framebuffer_height(),
limits.max_framebuffer_layers()];
if dimensions[0] > limits[0] || dimensions[1] > limits[1] ||
dimensions[2] > limits[2]
{
return Err(FramebufferCreationError::DimensionsTooLarge);
}
}
// Checking the dimensions against the attachments.
if let Some(dims_constraints) = attachments.intersection_dimensions() {
if dims_constraints[0] < dimensions[0] || dims_constraints[1] < dimensions[1] ||
dims_constraints[2] < dimensions[2]
{
return Err(FramebufferCreationError::AttachmentTooSmall);
}
}
let ids: SmallVec<[vk::ImageView; 8]> =
attachments.raw_image_view_handles().into_iter().map(|v| v.internal_object()).collect();
let framebuffer = unsafe {
let vk = render_pass.device().pointers();
let infos = vk::FramebufferCreateInfo {
sType: vk::STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
renderPass: render_pass.inner().internal_object(),
attachmentCount: ids.len() as u32,
pAttachments: ids.as_ptr(),
width: dimensions[0],
height: dimensions[1],
layers: dimensions[2],
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateFramebuffer(device.internal_object(), &infos,
ptr::null(), &mut output)));
output
};
Ok(Arc::new(Framebuffer {
device: device,
render_pass: render_pass,
framebuffer: framebuffer,
dimensions: dimensions,
resources: attachments,
}))
}
}
impl<Rp, A> Framebuffer<Rp, A> {
/// Returns the width, height and layers of this framebuffer.
#[inline]
pub fn dimensions(&self) -> [u32; 3] {
self.dimensions
}
/// Returns the width of the framebuffer in pixels.
#[inline]
pub fn width(&self) -> u32 {
self.dimensions[0]
}
/// Returns the height of the framebuffer in pixels.
#[inline]
pub fn height(&self) -> u32 {
self.dimensions[1]
}
/// Returns the number of layers (or depth) of the framebuffer.
#[inline]
pub fn layers(&self) -> u32 {
self.dimensions[2]
}
/// Returns the device that was used to create this framebuffer.
#[inline]
pub fn device(&self) -> &Arc<Device> {
&self.device
}
/// Returns the renderpass that was used to create this framebuffer.
#[inline]
pub fn render_pass(&self) -> &Rp {
&self.render_pass
}
}
unsafe impl<Rp, A> FramebufferAbstract for Framebuffer<Rp, A>
where Rp: RenderPassAbstract,
A: AttachmentsList
{
#[inline]
fn inner(&self) -> FramebufferSys {
FramebufferSys(self.framebuffer, PhantomData)
}
#[inline]
fn dimensions(&self) -> [u32; 3] {
self.dimensions
}
#[inline]
fn attachments(&self) -> Vec<&ImageViewAccess> {
self.resources.as_image_view_accesses()
}
}
unsafe impl<Rp, A> RenderPassDesc for Framebuffer<Rp, A> where Rp: RenderPassDesc {
#[inline]
fn num_attachments(&self) -> usize {
self.render_pass.num_attachments()
}
#[inline]
fn attachment_desc(&self, num: usize) -> Option<LayoutAttachmentDescription> {
self.render_pass.attachment_desc(num)
}
#[inline]
fn num_subpasses(&self) -> usize {
self.render_pass.num_subpasses()
}
#[inline]
fn subpass_desc(&self, num: usize) -> Option<LayoutPassDescription> {
self.render_pass.subpass_desc(num)
}
#[inline]
fn num_dependencies(&self) -> usize {
self.render_pass.num_dependencies()
}
#[inline]
fn dependency_desc(&self, num: usize) -> Option<LayoutPassDependencyDescription> {
self.render_pass.dependency_desc(num) | where Rp: RenderPassDescAttachmentsList<At>
{
#[inline]
fn check_attachments_list(&self, atch: At) -> Result<Box<AttachmentsList + Send + Sync>, FramebufferCreationError> {
self.render_pass.check_attachments_list(atch)
}
}
unsafe impl<C, Rp, A> RenderPassDescClearValues<C> for Framebuffer<Rp, A>
where Rp: RenderPassDescClearValues<C>
{
#[inline]
fn convert_clear_values(&self, vals: C) -> Box<Iterator<Item = ClearValue>> {
self.render_pass.convert_clear_values(vals)
}
}
unsafe impl<Rp, A> RenderPassAbstract for Framebuffer<Rp, | }
}
unsafe impl<At, Rp, A> RenderPassDescAttachmentsList<At> for Framebuffer<Rp, A> | random_line_split |
framebuffer.rs | PassAbstract + Send + Sync> = return;
/// # let my_image: Arc<vulkano::image::ImageViewAccess> = return;
/// // let render_pass: Arc<RenderPassAbstract + Send + Sync> = ...;
/// let framebuffer = Framebuffer::new(render_pass.clone(), [1024, 768, 1],
/// vec![my_image.clone() as Arc<_>]).unwrap();
/// ```
///
/// ## With a specialized list of attachments
///
/// The list of attachments can also be of any type `T`, as long as the render pass description
/// implements the trait `RenderPassDescAttachmentsList<T>`.
///
/// For example if you pass a render pass object that implements
/// `RenderPassDescAttachmentsList<Foo>`, then you can pass a `Foo` as the list of attachments.
///
/// > **Note**: The reason why `Vec<Arc<ImageView + Send + Sync>>` always works (see previous section) is that
/// > render pass descriptions are required to always implement
/// > `RenderPassDescAttachmentsList<Vec<Arc<ImageViewAccess + Send + Sync>>>`.
///
/// When it comes to the `single_pass_renderpass!` and `ordered_passes_renderpass!` macros, you can
/// build a list of attachments by calling `start_attachments()` on the render pass description,
/// which will return an object that has a method whose name is the name of the first attachment
/// and that can be used to specify it. This method will return another object that has a method
/// whose name is the name of the second attachment, and so on. See the documentation of the macros
/// for more details. TODO: put link here
///
/// ```ignore // FIXME: unignore
/// # #[macro_use] extern crate vulkano;
/// # fn main() {
/// # let device: std::sync::Arc<vulkano::device::Device> = return;
/// use std::sync::Arc;
/// use vulkano::format::Format;
/// use vulkano::framebuffer::Framebuffer;
///
/// let render_pass = single_pass_renderpass!(device.clone(),
/// attachments: {
/// // `foo` is a custom name we give to the first and only attachment.
/// foo: {
/// load: Clear,
/// store: Store,
/// format: Format::R8G8B8A8Unorm,
/// samples: 1,
/// }
/// },
/// pass: {
/// color: [foo], // Repeat the attachment name here.
/// depth_stencil: {}
/// }
/// ).unwrap();
///
/// # let my_image: Arc<vulkano::image::ImageViewAccess> = return;
/// let framebuffer = {
/// let atch = render_pass.desc().start_attachments().foo(my_image.clone() as Arc<_>);
/// Framebuffer::new(render_pass, [1024, 768, 1], atch).unwrap()
/// };
/// # }
/// ```
#[derive(Debug)]
pub struct Framebuffer<Rp, A> {
device: Arc<Device>,
render_pass: Rp,
framebuffer: vk::Framebuffer,
dimensions: [u32; 3],
resources: A,
}
impl<Rp> Framebuffer<Rp, Box<AttachmentsList + Send + Sync>> {
/// Builds a new framebuffer.
///
/// The `attachments` parameter depends on which render pass implementation is used.
// TODO: allow ImageView
pub fn | <Ia>(render_pass: Rp, dimensions: [u32; 3], attachments: Ia)
-> Result<Arc<Framebuffer<Rp, Box<AttachmentsList + Send + Sync>>>, FramebufferCreationError>
where Rp: RenderPassAbstract + RenderPassDescAttachmentsList<Ia>
{
let device = render_pass.device().clone();
// This function call is supposed to check whether the attachments are valid.
// For more safety, we do some additional `debug_assert`s below.
let attachments = try!(render_pass.check_attachments_list(attachments));
// TODO: add a debug assertion that checks whether the attachments are compatible
// with the RP ; this should be checked by the RenderPassDescAttachmentsList trait
// impl, but we can double-check in debug mode
// Checking the dimensions against the limits.
{
let limits = render_pass.device().physical_device().limits();
let limits = [limits.max_framebuffer_width(), limits.max_framebuffer_height(),
limits.max_framebuffer_layers()];
if dimensions[0] > limits[0] || dimensions[1] > limits[1] ||
dimensions[2] > limits[2]
{
return Err(FramebufferCreationError::DimensionsTooLarge);
}
}
// Checking the dimensions against the attachments.
if let Some(dims_constraints) = attachments.intersection_dimensions() {
if dims_constraints[0] < dimensions[0] || dims_constraints[1] < dimensions[1] ||
dims_constraints[2] < dimensions[2]
{
return Err(FramebufferCreationError::AttachmentTooSmall);
}
}
let ids: SmallVec<[vk::ImageView; 8]> =
attachments.raw_image_view_handles().into_iter().map(|v| v.internal_object()).collect();
let framebuffer = unsafe {
let vk = render_pass.device().pointers();
let infos = vk::FramebufferCreateInfo {
sType: vk::STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
renderPass: render_pass.inner().internal_object(),
attachmentCount: ids.len() as u32,
pAttachments: ids.as_ptr(),
width: dimensions[0],
height: dimensions[1],
layers: dimensions[2],
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateFramebuffer(device.internal_object(), &infos,
ptr::null(), &mut output)));
output
};
Ok(Arc::new(Framebuffer {
device: device,
render_pass: render_pass,
framebuffer: framebuffer,
dimensions: dimensions,
resources: attachments,
}))
}
}
impl<Rp, A> Framebuffer<Rp, A> {
/// Returns the width, height and layers of this framebuffer.
#[inline]
pub fn dimensions(&self) -> [u32; 3] {
self.dimensions
}
/// Returns the width of the framebuffer in pixels.
#[inline]
pub fn width(&self) -> u32 {
self.dimensions[0]
}
/// Returns the height of the framebuffer in pixels.
#[inline]
pub fn height(&self) -> u32 {
self.dimensions[1]
}
/// Returns the number of layers (or depth) of the framebuffer.
#[inline]
pub fn layers(&self) -> u32 {
self.dimensions[2]
}
/// Returns the device that was used to create this framebuffer.
#[inline]
pub fn device(&self) -> &Arc<Device> {
&self.device
}
/// Returns the renderpass that was used to create this framebuffer.
#[inline]
pub fn render_pass(&self) -> &Rp {
&self.render_pass
}
}
unsafe impl<Rp, A> FramebufferAbstract for Framebuffer<Rp, A>
where Rp: RenderPassAbstract,
A: AttachmentsList
{
#[inline]
fn inner(&self) -> FramebufferSys {
FramebufferSys(self.framebuffer, PhantomData)
}
#[inline]
fn dimensions(&self) -> [u32; 3] {
self.dimensions
}
#[inline]
fn attachments(&self) -> Vec<&ImageViewAccess> {
self.resources.as_image_view_accesses()
}
}
unsafe impl<Rp, A> RenderPassDesc for Framebuffer<Rp, A> where Rp: RenderPassDesc {
#[inline]
fn num_attachments(&self) -> usize {
self.render_pass.num_attachments()
}
#[inline]
fn attachment_desc(&self, num: usize) -> Option<LayoutAttachmentDescription> {
self.render_pass.attachment_desc(num)
}
#[inline]
fn num_subpasses(&self) -> usize {
self.render_pass.num_subpasses()
}
#[inline]
fn subpass_desc(&self, num: usize) -> Option<LayoutPassDescription> {
self.render_pass.subpass_desc(num)
}
#[inline]
fn num_dependencies(&self) -> usize {
self.render_pass.num_dependencies()
}
#[inline]
fn dependency_desc(&self, num: usize) -> Option<LayoutPassDependencyDescription> {
self.render_pass.dependency_desc(num)
}
}
unsafe impl<At, Rp, A> RenderPassDescAttachmentsList<At> for Framebuffer<Rp, A>
where Rp: RenderPassDescAttachmentsList<At>
{
#[inline]
fn check_attachments_list(&self, atch: At) -> Result<Box<AttachmentsList + Send + Sync>, FramebufferCreationError> {
self.render_pass.check_attachments_list(atch)
}
}
unsafe impl<C, Rp, A> RenderPassDescClearValues<C> for Framebuffer<Rp, A>
where Rp: RenderPassDescClearValues<C>
{
#[inline]
fn convert_clear_values(&self, vals: C) -> Box<Iterator<Item = ClearValue>> {
self.render_pass.convert_clear_values(vals)
}
}
unsafe impl<Rp, A> RenderPassAbstract for Framebuffer<Rp | new | identifier_name |
framebuffer.rs | PassAbstract + Send + Sync> = return;
/// # let my_image: Arc<vulkano::image::ImageViewAccess> = return;
/// // let render_pass: Arc<RenderPassAbstract + Send + Sync> = ...;
/// let framebuffer = Framebuffer::new(render_pass.clone(), [1024, 768, 1],
/// vec![my_image.clone() as Arc<_>]).unwrap();
/// ```
///
/// ## With a specialized list of attachments
///
/// The list of attachments can also be of any type `T`, as long as the render pass description
/// implements the trait `RenderPassDescAttachmentsList<T>`.
///
/// For example if you pass a render pass object that implements
/// `RenderPassDescAttachmentsList<Foo>`, then you can pass a `Foo` as the list of attachments.
///
/// > **Note**: The reason why `Vec<Arc<ImageView + Send + Sync>>` always works (see previous section) is that
/// > render pass descriptions are required to always implement
/// > `RenderPassDescAttachmentsList<Vec<Arc<ImageViewAccess + Send + Sync>>>`.
///
/// When it comes to the `single_pass_renderpass!` and `ordered_passes_renderpass!` macros, you can
/// build a list of attachments by calling `start_attachments()` on the render pass description,
/// which will return an object that has a method whose name is the name of the first attachment
/// and that can be used to specify it. This method will return another object that has a method
/// whose name is the name of the second attachment, and so on. See the documentation of the macros
/// for more details. TODO: put link here
///
/// ```ignore // FIXME: unignore
/// # #[macro_use] extern crate vulkano;
/// # fn main() {
/// # let device: std::sync::Arc<vulkano::device::Device> = return;
/// use std::sync::Arc;
/// use vulkano::format::Format;
/// use vulkano::framebuffer::Framebuffer;
///
/// let render_pass = single_pass_renderpass!(device.clone(),
/// attachments: {
/// // `foo` is a custom name we give to the first and only attachment.
/// foo: {
/// load: Clear,
/// store: Store,
/// format: Format::R8G8B8A8Unorm,
/// samples: 1,
/// }
/// },
/// pass: {
/// color: [foo], // Repeat the attachment name here.
/// depth_stencil: {}
/// }
/// ).unwrap();
///
/// # let my_image: Arc<vulkano::image::ImageViewAccess> = return;
/// let framebuffer = {
/// let atch = render_pass.desc().start_attachments().foo(my_image.clone() as Arc<_>);
/// Framebuffer::new(render_pass, [1024, 768, 1], atch).unwrap()
/// };
/// # }
/// ```
#[derive(Debug)]
pub struct Framebuffer<Rp, A> {
device: Arc<Device>,
render_pass: Rp,
framebuffer: vk::Framebuffer,
dimensions: [u32; 3],
resources: A,
}
impl<Rp> Framebuffer<Rp, Box<AttachmentsList + Send + Sync>> {
/// Builds a new framebuffer.
///
/// The `attachments` parameter depends on which render pass implementation is used.
// TODO: allow ImageView
pub fn new<Ia>(render_pass: Rp, dimensions: [u32; 3], attachments: Ia)
-> Result<Arc<Framebuffer<Rp, Box<AttachmentsList + Send + Sync>>>, FramebufferCreationError>
where Rp: RenderPassAbstract + RenderPassDescAttachmentsList<Ia>
{
let device = render_pass.device().clone();
// This function call is supposed to check whether the attachments are valid.
// For more safety, we do some additional `debug_assert`s below.
let attachments = try!(render_pass.check_attachments_list(attachments));
// TODO: add a debug assertion that checks whether the attachments are compatible
// with the RP ; this should be checked by the RenderPassDescAttachmentsList trait
// impl, but we can double-check in debug mode
// Checking the dimensions against the limits.
{
let limits = render_pass.device().physical_device().limits();
let limits = [limits.max_framebuffer_width(), limits.max_framebuffer_height(),
limits.max_framebuffer_layers()];
if dimensions[0] > limits[0] || dimensions[1] > limits[1] ||
dimensions[2] > limits[2]
|
}
// Checking the dimensions against the attachments.
if let Some(dims_constraints) = attachments.intersection_dimensions() {
if dims_constraints[0] < dimensions[0] || dims_constraints[1] < dimensions[1] ||
dims_constraints[2] < dimensions[2]
{
return Err(FramebufferCreationError::AttachmentTooSmall);
}
}
let ids: SmallVec<[vk::ImageView; 8]> =
attachments.raw_image_view_handles().into_iter().map(|v| v.internal_object()).collect();
let framebuffer = unsafe {
let vk = render_pass.device().pointers();
let infos = vk::FramebufferCreateInfo {
sType: vk::STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
renderPass: render_pass.inner().internal_object(),
attachmentCount: ids.len() as u32,
pAttachments: ids.as_ptr(),
width: dimensions[0],
height: dimensions[1],
layers: dimensions[2],
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateFramebuffer(device.internal_object(), &infos,
ptr::null(), &mut output)));
output
};
Ok(Arc::new(Framebuffer {
device: device,
render_pass: render_pass,
framebuffer: framebuffer,
dimensions: dimensions,
resources: attachments,
}))
}
}
impl<Rp, A> Framebuffer<Rp, A> {
/// Returns the width, height and layers of this framebuffer.
#[inline]
pub fn dimensions(&self) -> [u32; 3] {
self.dimensions
}
/// Returns the width of the framebuffer in pixels.
#[inline]
pub fn width(&self) -> u32 {
self.dimensions[0]
}
/// Returns the height of the framebuffer in pixels.
#[inline]
pub fn height(&self) -> u32 {
self.dimensions[1]
}
/// Returns the number of layers (or depth) of the framebuffer.
#[inline]
pub fn layers(&self) -> u32 {
self.dimensions[2]
}
/// Returns the device that was used to create this framebuffer.
#[inline]
pub fn device(&self) -> &Arc<Device> {
&self.device
}
/// Returns the renderpass that was used to create this framebuffer.
#[inline]
pub fn render_pass(&self) -> &Rp {
&self.render_pass
}
}
unsafe impl<Rp, A> FramebufferAbstract for Framebuffer<Rp, A>
where Rp: RenderPassAbstract,
A: AttachmentsList
{
#[inline]
fn inner(&self) -> FramebufferSys {
FramebufferSys(self.framebuffer, PhantomData)
}
#[inline]
fn dimensions(&self) -> [u32; 3] {
self.dimensions
}
#[inline]
fn attachments(&self) -> Vec<&ImageViewAccess> {
self.resources.as_image_view_accesses()
}
}
unsafe impl<Rp, A> RenderPassDesc for Framebuffer<Rp, A> where Rp: RenderPassDesc {
#[inline]
fn num_attachments(&self) -> usize {
self.render_pass.num_attachments()
}
#[inline]
fn attachment_desc(&self, num: usize) -> Option<LayoutAttachmentDescription> {
self.render_pass.attachment_desc(num)
}
#[inline]
fn num_subpasses(&self) -> usize {
self.render_pass.num_subpasses()
}
#[inline]
fn subpass_desc(&self, num: usize) -> Option<LayoutPassDescription> {
self.render_pass.subpass_desc(num)
}
#[inline]
fn num_dependencies(&self) -> usize {
self.render_pass.num_dependencies()
}
#[inline]
fn dependency_desc(&self, num: usize) -> Option<LayoutPassDependencyDescription> {
self.render_pass.dependency_desc(num)
}
}
unsafe impl<At, Rp, A> RenderPassDescAttachmentsList<At> for Framebuffer<Rp, A>
where Rp: RenderPassDescAttachmentsList<At>
{
#[inline]
fn check_attachments_list(&self, atch: At) -> Result<Box<AttachmentsList + Send + Sync>, FramebufferCreationError> {
self.render_pass.check_attachments_list(atch)
}
}
unsafe impl<C, Rp, A> RenderPassDescClearValues<C> for Framebuffer<Rp, A>
where Rp: RenderPassDescClearValues<C>
{
#[inline]
fn convert_clear_values(&self, vals: C) -> Box<Iterator<Item = ClearValue>> {
self.render_pass.convert_clear_values(vals)
}
}
unsafe impl<Rp, A> RenderPassAbstract for Framebuffer<Rp | {
return Err(FramebufferCreationError::DimensionsTooLarge);
} | conditional_block |
utils.go | 02 Jan 2006 15:04:05 -07:00",
"Mon, 02 Jan 2006 15:04:05 00",
"Mon, 02 Jan 2006 15:04:05 MST -0700",
"Mon, 02 Jan 2006 15:04:05 MST",
"Mon, 02 Jan 2006 15:04:05 MST-07:00",
"Mon, 02 Jan 2006 15:04:05 UT",
"Mon, 02 Jan 2006 15:04:05 Z",
"Mon, 02 Jan 2006 15:04:05",
"Mon, 02 Jan 2006 15:04:05MST",
"Mon, 02 Jan 2006 3:04:05 PM MST",
"Mon, 02 Jan 2006",
"Mon, 02 January 2006",
"Mon, 2 Jan 06 15:04:05 -0700",
"Mon, 2 Jan 06 15:04:05 MST",
"Mon, 2 Jan 15:04:05 MST",
"Mon, 2 Jan 2006 15:04",
"Mon, 2 Jan 2006 15:04:05 -0700 MST",
"Mon, 2 Jan 2006 15:04:05 -0700",
"Mon, 2 Jan 2006 15:04:05 MST",
"Mon, 2 Jan 2006 15:04:05 UT",
"Mon, 2 Jan 2006 15:04:05",
"Mon, 2 Jan 2006 15:04:05-0700",
"Mon, 2 Jan 2006 15:04:05MST",
"Mon, 2 Jan 2006 15:4:5 MST",
"Mon, 2 Jan 2006",
"Mon, 2 Jan 2006, 15:04 -0700",
"Mon, 2 January 2006 15:04:05 -0700",
"Mon, 2 January 2006 15:04:05 MST",
"Mon, 2 January 2006, 15:04 -0700",
"Mon, 2 January 2006, 15:04:05 MST",
"Mon, 2, Jan 2006 15:4",
"Mon, Jan 2 2006 15:04:05 -0700",
"Mon, Jan 2 2006 15:04:05 -700",
"Mon, January 02, 2006, 15:04:05 MST",
"Mon, January 2 2006 15:04:05 -0700",
"Mon,02 Jan 2006 15:04:05 -0700",
"Mon,02 January 2006 14:04:05 MST",
"Monday, 02 January 2006 15:04:05 -0700",
"Monday, 02 January 2006 15:04:05 MST",
"Monday, 02 January 2006 15:04:05",
"Monday, 2 Jan 2006 15:04:05 -0700",
"Monday, 2 Jan 2006 15:04:05 MST",
"Monday, 2 January 2006 15:04:05 -0700",
"Monday, 2 January 2006 15:04:05 MST",
"Monday, January 02, 2006",
"Monday, January 2, 2006 03:04 PM",
"Monday, January 2, 2006 15:04:05 MST",
"Monday, January 2, 2006",
"Updated January 2, 2006",
"mon,2 Jan 2006 15:04:05 MST",
time.ANSIC,
time.RFC1123,
time.RFC1123Z,
time.RFC3339,
time.RFC822,
time.RFC822Z,
time.RFC850,
time.RubyDate,
time.UnixDate,
}
func parseDate(c appengine.Context, feed *Feed, ds ...string) (t time.Time, err error) {
for _, d := range ds {
d = strings.TrimSpace(d)
if d == "" {
continue
}
for _, f := range dateFormats {
if t, err = time.Parse(f, d); err == nil {
return
}
}
gn := goon.FromContext(c)
gn.Put(&DateFormat{
Id: d,
Parent: gn.Key(feed),
})
}
err = fmt.Errorf("could not parse date: %v", strings.Join(ds, ", "))
return
}
func ParseFeed(c appengine.Context, u string, b []byte) (*Feed, []*Story) {
f := Feed{Url: u}
var s []*Story
a := atom.Feed{}
var atomerr, rsserr, rdferr, err error
var fb, eb *url.URL
d := xml.NewDecoder(bytes.NewReader(b))
d.CharsetReader = charset.NewReader
if atomerr = d.Decode(&a); atomerr == nil {
f.Title = a.Title
if t, err := parseDate(c, &f, string(a.Updated)); err == nil {
f.Updated = t
}
if fb, err = url.Parse(a.XMLBase); err != nil {
fb, _ = url.Parse("")
}
if len(a.Link) > 0 {
f.Link = findBestAtomLink(c, a.Link).Href
if l, err := fb.Parse(f.Link); err == nil {
f.Link = l.String()
}
}
for _, i := range a.Entry {
if eb, err = fb.Parse(i.XMLBase); err != nil {
eb = fb
}
st := Story{
Id: i.ID,
Title: i.Title,
}
if t, err := parseDate(c, &f, string(i.Updated)); err == nil {
st.Updated = t
}
if t, err := parseDate(c, &f, string(i.Published)); err == nil {
st.Published = t
}
if len(i.Link) > 0 {
st.Link = findBestAtomLink(c, i.Link).Href
if l, err := eb.Parse(st.Link); err == nil {
st.Link = l.String()
}
}
if i.Author != nil {
st.Author = i.Author.Name
}
if i.Content != nil {
if len(strings.TrimSpace(i.Content.Body)) != 0 {
st.content = i.Content.Body
} else if len(i.Content.InnerXML) != 0 {
st.content = i.Content.InnerXML
}
} else if i.Summary != nil {
st.content = i.Summary.Body
}
s = append(s, &st)
}
return parseFix(c, &f, s)
}
r := rss.Rss{}
d = xml.NewDecoder(bytes.NewReader(b))
d.CharsetReader = charset.NewReader
d.DefaultSpace = "DefaultSpace"
if rsserr = d.Decode(&r); rsserr == nil {
f.Title = r.Title
f.Link = r.Link
if t, err := parseDate(c, &f, r.LastBuildDate, r.PubDate); err == nil {
f.Updated = t
} else {
c.Warningf("no rss feed date: %v", f.Link)
}
for _, i := range r.Items {
st := Story{
Link: i.Link,
Author: i.Author,
}
if i.Title != "" {
st.Title = i.Title
} else if i.Description != "" | {
i.Title = i.Description
} | conditional_block | |
utils.go | 6 15:04:05",
"Monday, 2 Jan 2006 15:04:05 -0700",
"Monday, 2 Jan 2006 15:04:05 MST",
"Monday, 2 January 2006 15:04:05 -0700",
"Monday, 2 January 2006 15:04:05 MST",
"Monday, January 02, 2006",
"Monday, January 2, 2006 03:04 PM",
"Monday, January 2, 2006 15:04:05 MST",
"Monday, January 2, 2006",
"Updated January 2, 2006",
"mon,2 Jan 2006 15:04:05 MST",
time.ANSIC,
time.RFC1123,
time.RFC1123Z,
time.RFC3339,
time.RFC822,
time.RFC822Z,
time.RFC850,
time.RubyDate,
time.UnixDate,
}
func parseDate(c appengine.Context, feed *Feed, ds ...string) (t time.Time, err error) {
for _, d := range ds {
d = strings.TrimSpace(d)
if d == "" {
continue
}
for _, f := range dateFormats {
if t, err = time.Parse(f, d); err == nil {
return
}
}
gn := goon.FromContext(c)
gn.Put(&DateFormat{
Id: d,
Parent: gn.Key(feed),
})
}
err = fmt.Errorf("could not parse date: %v", strings.Join(ds, ", "))
return
}
func ParseFeed(c appengine.Context, u string, b []byte) (*Feed, []*Story) {
f := Feed{Url: u}
var s []*Story
a := atom.Feed{}
var atomerr, rsserr, rdferr, err error
var fb, eb *url.URL
d := xml.NewDecoder(bytes.NewReader(b))
d.CharsetReader = charset.NewReader
if atomerr = d.Decode(&a); atomerr == nil {
f.Title = a.Title
if t, err := parseDate(c, &f, string(a.Updated)); err == nil {
f.Updated = t
}
if fb, err = url.Parse(a.XMLBase); err != nil {
fb, _ = url.Parse("")
}
if len(a.Link) > 0 {
f.Link = findBestAtomLink(c, a.Link).Href
if l, err := fb.Parse(f.Link); err == nil {
f.Link = l.String()
}
}
for _, i := range a.Entry {
if eb, err = fb.Parse(i.XMLBase); err != nil {
eb = fb
}
st := Story{
Id: i.ID,
Title: i.Title,
}
if t, err := parseDate(c, &f, string(i.Updated)); err == nil {
st.Updated = t
}
if t, err := parseDate(c, &f, string(i.Published)); err == nil {
st.Published = t
}
if len(i.Link) > 0 {
st.Link = findBestAtomLink(c, i.Link).Href
if l, err := eb.Parse(st.Link); err == nil {
st.Link = l.String()
}
}
if i.Author != nil {
st.Author = i.Author.Name
}
if i.Content != nil {
if len(strings.TrimSpace(i.Content.Body)) != 0 {
st.content = i.Content.Body
} else if len(i.Content.InnerXML) != 0 {
st.content = i.Content.InnerXML
}
} else if i.Summary != nil {
st.content = i.Summary.Body
}
s = append(s, &st)
}
return parseFix(c, &f, s)
}
r := rss.Rss{}
d = xml.NewDecoder(bytes.NewReader(b))
d.CharsetReader = charset.NewReader
d.DefaultSpace = "DefaultSpace"
if rsserr = d.Decode(&r); rsserr == nil {
f.Title = r.Title
f.Link = r.Link
if t, err := parseDate(c, &f, r.LastBuildDate, r.PubDate); err == nil {
f.Updated = t
} else {
c.Warningf("no rss feed date: %v", f.Link)
}
for _, i := range r.Items {
st := Story{
Link: i.Link,
Author: i.Author,
}
if i.Title != "" {
st.Title = i.Title
} else if i.Description != "" {
i.Title = i.Description
}
if i.Content != "" {
st.content = i.Content
} else if i.Title != "" && i.Description != "" {
st.content = i.Description
}
if i.Guid != nil {
st.Id = i.Guid.Guid
}
if i.Media != nil {
st.MediaContent = i.Media.URL
}
if t, err := parseDate(c, &f, i.PubDate, i.Date, i.Published); err == nil {
st.Published = t
st.Updated = t
}
s = append(s, &st)
}
return parseFix(c, &f, s)
}
rd := rdf.RDF{}
d = xml.NewDecoder(bytes.NewReader(b))
d.CharsetReader = charset.NewReader
if rdferr = d.Decode(&rd); rdferr == nil {
if rd.Channel != nil {
f.Title = rd.Channel.Title
f.Link = rd.Channel.Link
if t, err := parseDate(c, &f, rd.Channel.Date); err == nil {
f.Updated = t
}
}
for _, i := range rd.Item {
st := Story{
Id: i.About,
Title: i.Title,
Link: i.Link,
Author: i.Creator,
}
st.content = html.UnescapeString(i.Description)
if t, err := parseDate(c, &f, i.Date); err == nil {
st.Published = t
st.Updated = t
}
s = append(s, &st)
}
return parseFix(c, &f, s)
}
c.Warningf("atom parse error: %s", atomerr.Error())
c.Warningf("xml parse error: %s", rsserr.Error())
c.Warningf("rdf parse error: %s", rdferr.Error())
return nil, nil
}
func findBestAtomLink(c appengine.Context, links []atom.Link) atom.Link {
getScore := func(l atom.Link) int {
switch {
case l.Rel == "hub":
return 0
case l.Type == "text/html":
return 3
case l.Rel != "self":
return 2
default:
return 1
}
}
var bestlink atom.Link
bestscore := -1
for _, l := range links {
score := getScore(l)
if score > bestscore {
bestlink = l
bestscore = score
}
}
return bestlink
}
func parseFix(c appengine.Context, f *Feed, ss []*Story) (*Feed, []*Story) {
g := goon.FromContext(c)
f.Checked = time.Now()
fk := g.Key(f)
f.Image = loadImage(c, f)
if u, err := url.Parse(f.Url); err == nil {
if ul, err := u.Parse(f.Link); err == nil {
f.Link = ul.String()
}
}
base, err := url.Parse(f.Link)
if err != nil {
c.Warningf("unable to parse link: %v", f.Link)
}
for _, s := range ss {
s.Parent = fk
s.Created = f.Checked
if !s.Updated.IsZero() && s.Published.IsZero() {
s.Published = s.Updated
}
if s.Published.IsZero() || f.Checked.Before(s.Published) {
s.Published = f.Checked
}
if !s.Updated.IsZero() {
s.Date = s.Updated.Unix()
} else {
s.Date = s.Published.Unix()
}
if s.Id == "" {
if s.Link != "" {
s.Id = s.Link
} else if s.Title != "" {
s.Id = s.Title
} else {
c.Errorf("story has no id: %v", s)
return nil, nil
}
}
// if a story doesn't have a link, see if its id is a URL
if s.Link == "" {
if u, err := url.Parse(s.Id); err == nil {
s.Link = u.String()
}
}
if base != nil && s.Link != "" { | link, err := base.Parse(s.Link) | random_line_split | |
utils.go | 04:05 MST",
"Monday, January 2, 2006",
"Updated January 2, 2006",
"mon,2 Jan 2006 15:04:05 MST",
time.ANSIC,
time.RFC1123,
time.RFC1123Z,
time.RFC3339,
time.RFC822,
time.RFC822Z,
time.RFC850,
time.RubyDate,
time.UnixDate,
}
func parseDate(c appengine.Context, feed *Feed, ds ...string) (t time.Time, err error) {
for _, d := range ds {
d = strings.TrimSpace(d)
if d == "" {
continue
}
for _, f := range dateFormats {
if t, err = time.Parse(f, d); err == nil {
return
}
}
gn := goon.FromContext(c)
gn.Put(&DateFormat{
Id: d,
Parent: gn.Key(feed),
})
}
err = fmt.Errorf("could not parse date: %v", strings.Join(ds, ", "))
return
}
func ParseFeed(c appengine.Context, u string, b []byte) (*Feed, []*Story) {
f := Feed{Url: u}
var s []*Story
a := atom.Feed{}
var atomerr, rsserr, rdferr, err error
var fb, eb *url.URL
d := xml.NewDecoder(bytes.NewReader(b))
d.CharsetReader = charset.NewReader
if atomerr = d.Decode(&a); atomerr == nil {
f.Title = a.Title
if t, err := parseDate(c, &f, string(a.Updated)); err == nil {
f.Updated = t
}
if fb, err = url.Parse(a.XMLBase); err != nil {
fb, _ = url.Parse("")
}
if len(a.Link) > 0 {
f.Link = findBestAtomLink(c, a.Link).Href
if l, err := fb.Parse(f.Link); err == nil {
f.Link = l.String()
}
}
for _, i := range a.Entry {
if eb, err = fb.Parse(i.XMLBase); err != nil {
eb = fb
}
st := Story{
Id: i.ID,
Title: i.Title,
}
if t, err := parseDate(c, &f, string(i.Updated)); err == nil {
st.Updated = t
}
if t, err := parseDate(c, &f, string(i.Published)); err == nil {
st.Published = t
}
if len(i.Link) > 0 {
st.Link = findBestAtomLink(c, i.Link).Href
if l, err := eb.Parse(st.Link); err == nil {
st.Link = l.String()
}
}
if i.Author != nil {
st.Author = i.Author.Name
}
if i.Content != nil {
if len(strings.TrimSpace(i.Content.Body)) != 0 {
st.content = i.Content.Body
} else if len(i.Content.InnerXML) != 0 {
st.content = i.Content.InnerXML
}
} else if i.Summary != nil {
st.content = i.Summary.Body
}
s = append(s, &st)
}
return parseFix(c, &f, s)
}
r := rss.Rss{}
d = xml.NewDecoder(bytes.NewReader(b))
d.CharsetReader = charset.NewReader
d.DefaultSpace = "DefaultSpace"
if rsserr = d.Decode(&r); rsserr == nil {
f.Title = r.Title
f.Link = r.Link
if t, err := parseDate(c, &f, r.LastBuildDate, r.PubDate); err == nil {
f.Updated = t
} else {
c.Warningf("no rss feed date: %v", f.Link)
}
for _, i := range r.Items {
st := Story{
Link: i.Link,
Author: i.Author,
}
if i.Title != "" {
st.Title = i.Title
} else if i.Description != "" {
i.Title = i.Description
}
if i.Content != "" {
st.content = i.Content
} else if i.Title != "" && i.Description != "" {
st.content = i.Description
}
if i.Guid != nil {
st.Id = i.Guid.Guid
}
if i.Media != nil {
st.MediaContent = i.Media.URL
}
if t, err := parseDate(c, &f, i.PubDate, i.Date, i.Published); err == nil {
st.Published = t
st.Updated = t
}
s = append(s, &st)
}
return parseFix(c, &f, s)
}
rd := rdf.RDF{}
d = xml.NewDecoder(bytes.NewReader(b))
d.CharsetReader = charset.NewReader
if rdferr = d.Decode(&rd); rdferr == nil {
if rd.Channel != nil {
f.Title = rd.Channel.Title
f.Link = rd.Channel.Link
if t, err := parseDate(c, &f, rd.Channel.Date); err == nil {
f.Updated = t
}
}
for _, i := range rd.Item {
st := Story{
Id: i.About,
Title: i.Title,
Link: i.Link,
Author: i.Creator,
}
st.content = html.UnescapeString(i.Description)
if t, err := parseDate(c, &f, i.Date); err == nil {
st.Published = t
st.Updated = t
}
s = append(s, &st)
}
return parseFix(c, &f, s)
}
c.Warningf("atom parse error: %s", atomerr.Error())
c.Warningf("xml parse error: %s", rsserr.Error())
c.Warningf("rdf parse error: %s", rdferr.Error())
return nil, nil
}
func findBestAtomLink(c appengine.Context, links []atom.Link) atom.Link {
getScore := func(l atom.Link) int {
switch {
case l.Rel == "hub":
return 0
case l.Type == "text/html":
return 3
case l.Rel != "self":
return 2
default:
return 1
}
}
var bestlink atom.Link
bestscore := -1
for _, l := range links {
score := getScore(l)
if score > bestscore {
bestlink = l
bestscore = score
}
}
return bestlink
}
func parseFix(c appengine.Context, f *Feed, ss []*Story) (*Feed, []*Story) {
g := goon.FromContext(c)
f.Checked = time.Now()
fk := g.Key(f)
f.Image = loadImage(c, f)
if u, err := url.Parse(f.Url); err == nil {
if ul, err := u.Parse(f.Link); err == nil {
f.Link = ul.String()
}
}
base, err := url.Parse(f.Link)
if err != nil {
c.Warningf("unable to parse link: %v", f.Link)
}
for _, s := range ss {
s.Parent = fk
s.Created = f.Checked
if !s.Updated.IsZero() && s.Published.IsZero() {
s.Published = s.Updated
}
if s.Published.IsZero() || f.Checked.Before(s.Published) {
s.Published = f.Checked
}
if !s.Updated.IsZero() {
s.Date = s.Updated.Unix()
} else {
s.Date = s.Published.Unix()
}
if s.Id == "" {
if s.Link != "" {
s.Id = s.Link
} else if s.Title != "" {
s.Id = s.Title
} else {
c.Errorf("story has no id: %v", s)
return nil, nil
}
}
// if a story doesn't have a link, see if its id is a URL
if s.Link == "" {
if u, err := url.Parse(s.Id); err == nil {
s.Link = u.String()
}
}
if base != nil && s.Link != "" {
link, err := base.Parse(s.Link)
if err == nil {
s.Link = link.String()
} else {
c.Warningf("unable to resolve link: %v", s.Link)
}
}
const keySize = 500
sk := g.Key(s)
if kl := len(sk.Encode()); kl > keySize {
c.Errorf("key too long: %v, %v, %v", kl, f.Url, s.Id)
return nil, nil
}
su, serr := url.Parse(s.Link)
if serr != nil {
su = &url.URL{}
s.Link = ""
}
s.content, s.Summary = Sanitize(s.content, su)
}
return f, ss
}
func | loadImage | identifier_name | |
utils.go | i.User = user
i.IsAdmin = cu.Admin
if len(user.Messages) > 0 {
i.Messages = user.Messages
user.Messages = nil
gn.Put(user)
}
/*
if _, err := r.Cookie("update-bug"); err != nil {
i.Messages = append(i.Messages, "Go Read had some problems updating feeds. It may take a while for new stories to appear again. Sorry about that.")
http.SetCookie(w, &http.Cookie{
Name: "update-bug",
Value: "done",
Expires: time.Now().Add(time.Hour * 24 * 7),
})
}
*/
}
}
return i
}
var dateFormats = []string{
"01-02-2006",
"01/02/2006 15:04:05 MST",
"02 Jan 2006 15:04 MST",
"02 Jan 2006 15:04:05 -0700",
"02 Jan 2006 15:04:05 MST",
"02 Jan 2006 15:04:05 UT",
"02 Jan 2006",
"02-01-2006 15:04:05 MST",
"02.01.2006 -0700",
"02.01.2006 15:04:05",
"02/01/2006 15:04:05",
"02/01/2006",
"06-1-2 15:04",
"06/1/2 15:04",
"1/2/2006 15:04:05 MST",
"1/2/2006 3:04:05 PM",
"15:04 02.01.2006 -0700",
"2 Jan 2006 15:04:05 MST",
"2 Jan 2006",
"2 January 2006 15:04:05 -0700",
"2 January 2006",
"2006 January 02",
"2006-01-02 00:00:00.0 15:04:05.0 -0700",
"2006-01-02 15:04",
"2006-01-02 15:04:05 -0700",
"2006-01-02 15:04:05 MST",
"2006-01-02 15:04:05-07:00",
"2006-01-02 15:04:05Z",
"2006-01-02",
"2006-01-02T15:04-07:00",
"2006-01-02T15:04:05 -0700",
"2006-01-02T15:04:05",
"2006-01-02T15:04:05-0700",
"2006-01-02T15:04:05-07:00",
"2006-01-02T15:04:05-07:00:00",
"2006-01-02T15:04:05:-0700",
"2006-01-02T15:04:05:00",
"2006-01-02T15:04:05Z",
"2006-1-02T15:04:05Z",
"2006-1-2 15:04:05",
"2006-1-2",
"2006/01/02",
"6-1-2 15:04",
"6/1/2 15:04",
"Jan 02 2006 03:04:05PM",
"Jan 2, 2006 15:04:05 MST",
"Jan 2, 2006 3:04:05 PM MST",
"January 02, 2006 03:04 PM",
"January 02, 2006 15:04",
"January 02, 2006 15:04:05 MST",
"January 02, 2006",
"January 2, 2006 03:04 PM",
"January 2, 2006 15:04:05 MST",
"January 2, 2006 15:04:05",
"January 2, 2006",
"January 2, 2006, 3:04 p.m.",
"Mon 02 Jan 2006 15:04:05 -0700",
"Mon 2 Jan 2006 15:04:05 MST",
"Mon Jan 2 15:04 2006",
"Mon Jan 2 15:04:05 2006 MST",
"Mon, 02 Jan 06 15:04:05 MST",
"Mon, 02 Jan 2006 15:04 -0700",
"Mon, 02 Jan 2006 15:04 MST",
"Mon, 02 Jan 2006 15:04:05 --0700",
"Mon, 02 Jan 2006 15:04:05 -07",
"Mon, 02 Jan 2006 15:04:05 -0700",
"Mon, 02 Jan 2006 15:04:05 -07:00",
"Mon, 02 Jan 2006 15:04:05 00",
"Mon, 02 Jan 2006 15:04:05 MST -0700",
"Mon, 02 Jan 2006 15:04:05 MST",
"Mon, 02 Jan 2006 15:04:05 MST-07:00",
"Mon, 02 Jan 2006 15:04:05 UT",
"Mon, 02 Jan 2006 15:04:05 Z",
"Mon, 02 Jan 2006 15:04:05",
"Mon, 02 Jan 2006 15:04:05MST",
"Mon, 02 Jan 2006 3:04:05 PM MST",
"Mon, 02 Jan 2006",
"Mon, 02 January 2006",
"Mon, 2 Jan 06 15:04:05 -0700",
"Mon, 2 Jan 06 15:04:05 MST",
| {
i := &Includes{
Angular: Angular,
BootstrapCss: BootstrapCss,
BootstrapJs: BootstrapJs,
Jquery: Jquery,
JqueryUI: JqueryUI,
Underscore: Underscore,
MiniProfiler: c.Includes(r),
GoogleAnalyticsId: GOOGLE_ANALYTICS_ID,
GoogleAnalyticsHost: GOOGLE_ANALYTICS_HOST,
IsDev: isDevServer,
StripeKey: STRIPE_KEY,
StripePlans: STRIPE_PLANS,
}
if cu := user.Current(c); cu != nil {
gn := goon.FromContext(c)
user := &User{Id: cu.ID}
if err := gn.Get(user); err == nil { | identifier_body | |
main.rs | : {}, {}", x, y),
}
}
enum Number_enum {
Zero,
One,
Two,
}
// enum with explicit discriminator
enum Color {
Red = 0xff0000,
Green = 0x00ff00,
Blue = 0x0000ff,
}
fn test_enum() {
let pressed = KeyPress('x');
// `to_owned()` creates an owned `String` from a string slice.
let pasted = Paste("my text".to_owned());
let click = Click { x: 20, y: 80 };
let load = PageLoad;
let unload = PageUnload;
inspect(pressed);
inspect(pasted);
inspect(click);
inspect(load);
inspect(unload);
let loadSec = WebEvent::PageLoad;
println!("{}", &loadSec.run(20));
println!("zero is {}", Number_enum::Zero as i32);
println!("one is {}", Number_enum::One as i32);
println!("roses are #{:06x}", Color::Red as i32);
println!("violets are #{:06x}", Color::Blue as i32);
}
fn test_var_bind() {
let mut x = 2;
{
let x = "4";
}
x = 4;
}
fn casting() {
let decimal = 22.8832_f32;
let integer = decimal as u8;
println!("Integer: {}", integer);
let character = integer as char;
println!("character: {}", character);
println!("1000 as a u16 is: {:b}", 1000 as u16);
let num = 1000;
println!("1000 as a u8 is : {:b}", num as u8);
println!(" -1 as a u8 is : {:b}", (-1i8) as u8);
println!("1000 mod 256 is : {:b}", 1000 % 256);
// Unless it already fits, of course.
println!(" 128 as a i16 is: {:b} ({})", 128 as i16, 128 as i16);
// 128 as u8 -> 128, whose two's complement in eight bits is:
let num: i16 = 128;
println!(" 128 as a i8 is : {:b} ({})", num as i8, num as i8);
println!("127={:b}", 127_i8);
println!("-128={:b}", -128_i8);
println!("255={:b}", 255_u8);
println!("0={:b}", 0_u8);
println!("255= {}", 127_u8 as i8);
println!("0={:b}", 0_u8 as i8);
let x = 1u8;
let y = 2u32;
let z = 3f32;
// Unsuffixed literal, their types depend on how they are used
let i = 1;
let f = 1.0;
// `size_of_val` returns the size of a variable in bytes
println!("size of `x` in bytes: {}", std::mem::size_of_val(&x));
println!("size of `y` in bytes: {}", std::mem::size_of_val(&y));
println!("size of `z` in bytes: {}", std::mem::size_of_val(&z));
println!("size of `i` in bytes: {}", std::mem::size_of_val(&i));
println!("size of `f` in bytes: {}", std::mem::size_of_val(&f));
let elem = 5u8;
// Create an empty vector (a growable array).
let mut vec = Vec::new();
// At this point the compiler doesn't know the exact type of `vec`, it
// just knows that it's a vector of something (`Vec<_>`).
// Insert `elem` in the vector.
vec.push(elem);
// Aha! Now the compiler knows that `vec` is a vector of `u8`s (`Vec<u8>`)
// TODO ^ Try commenting out the `vec.push(elem)` line
println!("{:?}", vec);
}
use std::convert::From;
use std::convert::Into;
use std::convert::TryFrom;
use std::convert::TryInto;
#[derive(Debug)]
struct Number {
value: i32,
}
impl From<i32> for Number {
fn from(item: i32) -> Self {
Number { value: item }
}
}
impl fmt::Display for Number {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Number is: {}", self.value)
}
}
#[derive(Debug, PartialEq)]
struct EvenNumber(i32);
impl TryFrom<i32> for EvenNumber {
type Error = ();
fn try_from(value: i32) -> Result<Self, Self::Error> {
if value % 2 == 0 {
Ok(EvenNumber(value))
} else {
Err(())
}
}
}
fn conversion() {
let s = "Test";
let myString = String::from(s);
let b = myString.into_boxed_str();
let ptr = b.as_ptr();
println!("b:{:?}", ptr);
let ref_b = b.as_ref();
println!("s:{:?}", ref_b);
let number = Number::from(34_i32);
println!("{}", number);
let n = 5;
let num: i32 = n.into();
println!("My number is {:?}", num);
assert_eq!(EvenNumber::try_from(8), Ok(EvenNumber(8)));
assert_eq!(EvenNumber::try_from(5), Err(()));
println!("{:?}", EvenNumber::try_from(5));
let result: Result<EvenNumber, ()> = 8i32.try_into();
assert_eq!(result, Ok(EvenNumber(8)));
let result: Result<EvenNumber, ()> = 5i32.try_into();
assert_eq!(result, Err(()));
let parsed: i32 = "5".parse().unwrap();
let turbo_parsed = "10".parse::<i32>().unwrap();
let sum = parsed + turbo_parsed;
println!("Sum: {:?}", sum);
}
fn expression() {
let x0 = 2;
let sum = {
let x = 20_i8;
let y = 12;
x + y + x0
};
println!("Sum: {:?}", sum)
}
fn flowControl() {
let mut count = 0;
loop {
count += 1;
if count % 2 == 0 {
continue;
}
println!("Count: {}", count);
if count > 6 {
break;
}
}
let mut optional = Some(20);
match optional {
Some(i) => {
println!("Number: {:?}", i);
}
_ => {
println!("Not present");
}
}
if let Some(i) = Some(20) {
println!("Number is indeed: {}", i);
}
while let Some(i) = optional {
if i == 25 | else {
println!("`i` is `{:?}`. Try again.", i);
optional = Some(i + 1);
}
}
}
fn isDivisibleBy(lhs: u32, rhs: u32) -> bool {
if (rhs == 0) {
return false;
}
lhs % rhs == 0
}
impl Point {
fn origin() -> Point {
Point { x: 0.0, y: 0.0 }
}
fn new(x: f32, y: f32) -> Point {
Point { x: x, y: y }
}
fn distance(&self, p: &Point) -> f32 {
let dx = self.x - p.x;
let dy: f32 = self.y - p.y;
(dx * dy + dy + dy).sqrt()
}
fn translate(&mut self, dx: f32, dy: f32) {
self.x += dx;
self.y += dy;
}
}
fn functions() {
println!("isD: {:?}", isDivisibleBy(20, 4));
println!("isD: {:?}", isDivisibleBy(20, 3));
let mut point = Point::new(22.2, 32.3);
let mut origin = Point::origin();
println!("Distance: {:?}", point.distance(&origin));
point.translate(3.0, -2.0);
println!("Point: {:?}", point);
println!("Distance: {:?}", point.distance(&origin));
let x = 20;
let fun1 = |i: i32| -> i32 { i + 1 + x };
let fun2 = |i| i + | {
println!("Number is: {}", i);
optional = None;
} | conditional_block |
main.rs | let i = 1;
let f = 1.0;
// `size_of_val` returns the size of a variable in bytes
println!("size of `x` in bytes: {}", std::mem::size_of_val(&x));
println!("size of `y` in bytes: {}", std::mem::size_of_val(&y));
println!("size of `z` in bytes: {}", std::mem::size_of_val(&z));
println!("size of `i` in bytes: {}", std::mem::size_of_val(&i));
println!("size of `f` in bytes: {}", std::mem::size_of_val(&f));
let elem = 5u8;
// Create an empty vector (a growable array).
let mut vec = Vec::new();
// At this point the compiler doesn't know the exact type of `vec`, it
// just knows that it's a vector of something (`Vec<_>`).
// Insert `elem` in the vector.
vec.push(elem);
// Aha! Now the compiler knows that `vec` is a vector of `u8`s (`Vec<u8>`)
// TODO ^ Try commenting out the `vec.push(elem)` line
println!("{:?}", vec);
}
use std::convert::From;
use std::convert::Into;
use std::convert::TryFrom;
use std::convert::TryInto;
#[derive(Debug)]
struct Number {
value: i32,
}
impl From<i32> for Number {
fn from(item: i32) -> Self {
Number { value: item }
}
}
impl fmt::Display for Number {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Number is: {}", self.value)
}
}
#[derive(Debug, PartialEq)]
struct EvenNumber(i32);
impl TryFrom<i32> for EvenNumber {
type Error = ();
fn try_from(value: i32) -> Result<Self, Self::Error> {
if value % 2 == 0 {
Ok(EvenNumber(value))
} else {
Err(())
}
}
}
fn conversion() {
let s = "Test";
let myString = String::from(s);
let b = myString.into_boxed_str();
let ptr = b.as_ptr();
println!("b:{:?}", ptr);
let ref_b = b.as_ref();
println!("s:{:?}", ref_b);
let number = Number::from(34_i32);
println!("{}", number);
let n = 5;
let num: i32 = n.into();
println!("My number is {:?}", num);
assert_eq!(EvenNumber::try_from(8), Ok(EvenNumber(8)));
assert_eq!(EvenNumber::try_from(5), Err(()));
println!("{:?}", EvenNumber::try_from(5));
let result: Result<EvenNumber, ()> = 8i32.try_into();
assert_eq!(result, Ok(EvenNumber(8)));
let result: Result<EvenNumber, ()> = 5i32.try_into();
assert_eq!(result, Err(()));
let parsed: i32 = "5".parse().unwrap();
let turbo_parsed = "10".parse::<i32>().unwrap();
let sum = parsed + turbo_parsed;
println!("Sum: {:?}", sum);
}
fn expression() {
let x0 = 2;
let sum = {
let x = 20_i8;
let y = 12;
x + y + x0
};
println!("Sum: {:?}", sum)
}
fn flowControl() {
let mut count = 0;
loop {
count += 1;
if count % 2 == 0 {
continue;
}
println!("Count: {}", count);
if count > 6 {
break;
}
}
let mut optional = Some(20);
match optional {
Some(i) => {
println!("Number: {:?}", i);
}
_ => {
println!("Not present");
}
}
if let Some(i) = Some(20) {
println!("Number is indeed: {}", i);
}
while let Some(i) = optional {
if i == 25 {
println!("Number is: {}", i);
optional = None;
} else {
println!("`i` is `{:?}`. Try again.", i);
optional = Some(i + 1);
}
}
}
fn isDivisibleBy(lhs: u32, rhs: u32) -> bool {
if (rhs == 0) {
return false;
}
lhs % rhs == 0
}
impl Point {
fn origin() -> Point {
Point { x: 0.0, y: 0.0 }
}
fn new(x: f32, y: f32) -> Point {
Point { x: x, y: y }
}
fn distance(&self, p: &Point) -> f32 {
let dx = self.x - p.x;
let dy: f32 = self.y - p.y;
(dx * dy + dy + dy).sqrt()
}
fn translate(&mut self, dx: f32, dy: f32) {
self.x += dx;
self.y += dy;
}
}
fn functions() {
println!("isD: {:?}", isDivisibleBy(20, 4));
println!("isD: {:?}", isDivisibleBy(20, 3));
let mut point = Point::new(22.2, 32.3);
let mut origin = Point::origin();
println!("Distance: {:?}", point.distance(&origin));
point.translate(3.0, -2.0);
println!("Point: {:?}", point);
println!("Distance: {:?}", point.distance(&origin));
let x = 20;
let fun1 = |i: i32| -> i32 { i + 1 + x };
let fun2 = |i| i + 1 + x;
let i = 1;
println!("Inc1: {:?}", fun1(i));
println!("Inc2: {:?}", fun2(i));
let one = || 1;
println!("One: {:?}", one());
let color = "Green";
let print = || println!("Color: {:?}", color);
print();
let _reborow = &color;
print();
let mut count = 0;
let mut inc = || {
count += 1;
println!("Count: {:?}", count);
};
inc();
inc();
let reborrow = &mut count;
let movable = Box::new(3);
let consume = || {
println!("Movable: {:?}", movable);
mem::drop(movable);
};
consume();
let haystack = vec![1, 2, 3];
let contains = move |needle| haystack.contains(needle);
println!("{}", contains(&1));
println!("{}", contains(&2));
}
fn apply<F>(f: F)
where
F: FnOnce(),
{
f();
}
fn apply_to_3<F>(f: F) -> i32
where
F: Fn(i32) -> i32,
{
f(3)
}
fn call_me<F: Fn()>(f: F) {
f();
}
fn function() {
println!("I am function");
}
fn functions2() {
let x = 30;
println!("x: {:?}", x);
let y = apply_to_3(|x| x + 20);
println!("y: {:?}", y);
let greeting = "Hello";
let mut farewel = "Goodby".to_owned();
let diary = || {
println!("I said {}", greeting);
farewel.push_str("!!!!!");
println!("Than I screemed {}", farewel);
println!("Than I sleep");
mem::drop(farewel);
};
apply(diary);
let double = |x| 2 * x;
println!("3 doubled: {}", apply_to_3(double));
let closure = || println!("I am closure");
call_me(function);
call_me(closure);
}
fn create_fn() -> impl Fn() {
let text = "Fn".to_owned();
move || println!("This is a text: {}", text)
}
fn create_fn_mut() -> impl FnMut() {
let text = "FnMut".to_owned();
move || println!("This is a text: {}", text)
}
fn create_fn_once() -> impl FnOnce() {
let text = "FnOnce".to_owned();
move || println!("This is a: {}", text)
}
fn functions3() {
let x = create_fn();
x();
let mut y = create_fn_mut();
y();
let z = create_fn_once();
z();
let v1 = vec![1, 2, 3];
let v2 = vec![4, 5, 6];
println!("2 in v1: {}", v1.iter().any(|&x| x == 2));
println!("2 in v2: {}", v2.iter().any(|&x| x == 2));
let a1 = [1, 2, 3];
let a2 = [4, 5, 6];
| random_line_split | ||
main.rs | );
call_me(closure);
}
fn create_fn() -> impl Fn() {
let text = "Fn".to_owned();
move || println!("This is a text: {}", text)
}
fn create_fn_mut() -> impl FnMut() {
let text = "FnMut".to_owned();
move || println!("This is a text: {}", text)
}
fn create_fn_once() -> impl FnOnce() {
let text = "FnOnce".to_owned();
move || println!("This is a: {}", text)
}
fn functions3() {
let x = create_fn();
x();
let mut y = create_fn_mut();
y();
let z = create_fn_once();
z();
let v1 = vec![1, 2, 3];
let v2 = vec![4, 5, 6];
println!("2 in v1: {}", v1.iter().any(|&x| x == 2));
println!("2 in v2: {}", v2.iter().any(|&x| x == 2));
let a1 = [1, 2, 3];
let a2 = [4, 5, 6];
println!("2 in v1: {}", a1.iter().any(|&x| x == 2));
println!("2 in v2: {}", a2.iter().any(|&x| x == 2));
let mut iter1 = v1.iter();
let mut into_iter = v2.into_iter();
println!("Find 2 in v1: {:?}", iter1.find(|&&x| x == 2));
println!("Find 2 in v1: {:?}", into_iter.find(|&x| x == 2));
let array1 = [1, 2, 3];
let array2 = [4, 5, 6];
println!("Find 2 in v1: {:?}", array1.iter().find(|&&x| x == 2));
println!("Find 2 in v1: {:?}", array2.into_iter().find(|&&x| x == 2));
let index_of_first_even_number = array1.iter().position(|x| x % 2 == 0);
println!(
"index_of_first_even_number: {}",
index_of_first_even_number.unwrap()
);
}
fn is_odd(n: u32) -> bool {
n % 2 == 1
}
fn foo() -> () {
()
}
fn higherOrder() {
let upper = 1000;
let mut acc = 0;
for n in 0.. {
let n_squared = n * n;
if n_squared > upper {
break;
} else if is_odd(n_squared) {
acc += n_squared;
}
}
println!("Sum1: {:?}", acc);
let sum2 = (0..)
.map(|n| n * n)
.take_while(|&n_squared| n_squared < upper)
.filter(|&n_squared| n_squared % 2 == 1)
.fold(0, |acc, n_squared| acc + n_squared);
println!("Sum2: {:?}", sum2);
fn sum_odd_numbers(up_to: u32) -> u32 {
let mut acc = 0;
for i in 0..up_to {
// Notice that the return type of this match expression must be u32
// because of the type of the "addition" variable.
let addition: u32 = match i % 2 == 1 {
// The "i" variable is of type u32, which is perfectly fine.
true => i,
// On the other hand, the "continue" expression does not return
// u32, but it is still fine, because it never returns and therefore
// does not violate the type requirements of the match expression.
false => continue,
};
acc += addition;
}
acc
}
println!(
"Sum of odd numbers up to 9 (excluding): {}",
sum_odd_numbers(9)
);
}
struct S;
struct GenericVal<T>(T);
impl GenericVal<i32> {}
impl GenericVal<S> {}
impl<T> GenericVal<T> {}
struct Val {
val: f64,
}
struct GenVal<T> {
gen_val: T,
}
// impl of Val
impl Val {
fn value(&self) -> &f64 {
&self.val
}
}
// impl of GenVal for a generic type `T`
impl<T> GenVal<T> {
fn value(&self) -> &T {
&self.gen_val
}
}
fn generics() {
let x = Val { val: 3.0 };
let y = GenVal { gen_val: 3i32 };
println!("{}, {}", x.value(), y.value());
}
fn create_box() {
let _box = Box::new(3i32);
}
struct ToDrop;
impl Drop for ToDrop {
fn drop(&mut self) {
println!("ToDrop is being dropped");
}
}
fn destroy_box(c: Box<i32>) {
println!("Destroying a box that contains {}", c);
// `c` is destroyed and the memory freed
}
fn scoping() {
/* create_box();
let _box2 = Box::new(5i32);
{
let _box3 = Box::new(4i32);
}
let x = ToDrop;
{
let y = ToDrop;
}*/
let x = 5u32;
let y = x;
println!("x is {}, and y is {}", x, y);
let a = Box::new(5i32);
let mut b = a;
*b = 30i32;
//destroy_box(b);
println!("{}", b);
}
fn ownership() {
let a = Box::new(5i32);
let mut b = a;
*b = 4;
println!("{}", b);
}
fn eat_box(boxed: Box<i32>) {
println!("{}", boxed);
}
fn borrow(borrowed: &i32) {
println!("{}", borrowed);
}
fn borrowing() {
let boxed = Box::new(5_i32);
let stacked = 6_i32;
borrow(&boxed);
borrow(&stacked);
{
let refTo: &i32 = &boxed;
borrow(refTo);
}
eat_box(boxed);
}
#[derive(Clone, Copy)]
struct Book {
author: &'static str,
title: &'static str,
year: u32,
}
fn borrow_book(book: &Book) {
println!("I immutably borrowed {} - {}", book.author, book.title);
}
fn new_edition(book: &mut Book) {
book.year = 2014;
println!("I mutably borrowed {} - {} edition", book.title, book.year);
}
fn mutability() {
let immutable_book = Book {
author: "Ivan Cankar",
title: "Hlapci",
year: 1910,
};
let mut mutable_book = immutable_book;
borrow_book(&immutable_book);
borrow_book(&mutable_book);
new_edition(&mut mutable_book);
}
struct Location {
x: i32,
y: i32,
z: i32,
}
fn aliasing() {
let mut location = Location { x: 0, y: 0, z: 0 };
let borrow1 = &location;
let borrow2 = &location;
println!("{} {}", location.x, borrow1.x);
//let mut_borow = &mut location;
println!(
"Location has coordinates: ({}, {}, {})",
borrow1.x, borrow2.y, location.z
);
let mut_borow = &mut location;
mut_borow.x = 10;
mut_borow.y = 23;
mut_borow.z = 29;
let borrow3 = &location;
}
#[derive(PartialEq, PartialOrd)]
struct Centimeters(f64);
#[derive(Debug)]
struct Inches(i32);
impl Inches {
fn to_centimeters(&self) -> Centimeters {
let &Inches(inches) = self;
Centimeters(inches as f64 * 2.54)
}
}
struct Seconds(i32);
fn deriveTest() {
let one_second = Seconds(1);
let foot = Inches(12);
println!("One foot equals: {:?}", foot);
let meter = Centimeters(100.0);
let cmp = if foot.to_centimeters() < meter {
"smaller"
} else {
"bigger"
};
println!("One foot is {} than one meter.", cmp);
}
struct Sheep {}
struct Cow {}
trait Animal {
fn noise(&self) -> &'static str;
}
impl Animal for Sheep {
fn noise(&self) -> &'static str {
"baaah"
}
}
impl Animal for Cow {
fn noise(&self) -> &'static str {
"moooooo"
}
}
fn random_animal(random_number: f64) -> Box<dyn Animal> {
if random_number < 0.5 {
Box::new(Sheep {})
} else {
Box::new(Cow {})
}
}
fn | dyReturn | identifier_name | |
main.rs | println!(" -1 as a u8 is : {:b}", (-1i8) as u8);
println!("1000 mod 256 is : {:b}", 1000 % 256);
// Unless it already fits, of course.
println!(" 128 as a i16 is: {:b} ({})", 128 as i16, 128 as i16);
// 128 as u8 -> 128, whose two's complement in eight bits is:
let num: i16 = 128;
println!(" 128 as a i8 is : {:b} ({})", num as i8, num as i8);
println!("127={:b}", 127_i8);
println!("-128={:b}", -128_i8);
println!("255={:b}", 255_u8);
println!("0={:b}", 0_u8);
println!("255= {}", 127_u8 as i8);
println!("0={:b}", 0_u8 as i8);
let x = 1u8;
let y = 2u32;
let z = 3f32;
// Unsuffixed literal, their types depend on how they are used
let i = 1;
let f = 1.0;
// `size_of_val` returns the size of a variable in bytes
println!("size of `x` in bytes: {}", std::mem::size_of_val(&x));
println!("size of `y` in bytes: {}", std::mem::size_of_val(&y));
println!("size of `z` in bytes: {}", std::mem::size_of_val(&z));
println!("size of `i` in bytes: {}", std::mem::size_of_val(&i));
println!("size of `f` in bytes: {}", std::mem::size_of_val(&f));
let elem = 5u8;
// Create an empty vector (a growable array).
let mut vec = Vec::new();
// At this point the compiler doesn't know the exact type of `vec`, it
// just knows that it's a vector of something (`Vec<_>`).
// Insert `elem` in the vector.
vec.push(elem);
// Aha! Now the compiler knows that `vec` is a vector of `u8`s (`Vec<u8>`)
// TODO ^ Try commenting out the `vec.push(elem)` line
println!("{:?}", vec);
}
use std::convert::From;
use std::convert::Into;
use std::convert::TryFrom;
use std::convert::TryInto;
#[derive(Debug)]
struct Number {
value: i32,
}
impl From<i32> for Number {
fn from(item: i32) -> Self {
Number { value: item }
}
}
impl fmt::Display for Number {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Number is: {}", self.value)
}
}
#[derive(Debug, PartialEq)]
struct EvenNumber(i32);
impl TryFrom<i32> for EvenNumber {
type Error = ();
fn try_from(value: i32) -> Result<Self, Self::Error> {
if value % 2 == 0 {
Ok(EvenNumber(value))
} else {
Err(())
}
}
}
fn conversion() {
let s = "Test";
let myString = String::from(s);
let b = myString.into_boxed_str();
let ptr = b.as_ptr();
println!("b:{:?}", ptr);
let ref_b = b.as_ref();
println!("s:{:?}", ref_b);
let number = Number::from(34_i32);
println!("{}", number);
let n = 5;
let num: i32 = n.into();
println!("My number is {:?}", num);
assert_eq!(EvenNumber::try_from(8), Ok(EvenNumber(8)));
assert_eq!(EvenNumber::try_from(5), Err(()));
println!("{:?}", EvenNumber::try_from(5));
let result: Result<EvenNumber, ()> = 8i32.try_into();
assert_eq!(result, Ok(EvenNumber(8)));
let result: Result<EvenNumber, ()> = 5i32.try_into();
assert_eq!(result, Err(()));
let parsed: i32 = "5".parse().unwrap();
let turbo_parsed = "10".parse::<i32>().unwrap();
let sum = parsed + turbo_parsed;
println!("Sum: {:?}", sum);
}
fn expression() {
let x0 = 2;
let sum = {
let x = 20_i8;
let y = 12;
x + y + x0
};
println!("Sum: {:?}", sum)
}
fn flowControl() {
let mut count = 0;
loop {
count += 1;
if count % 2 == 0 {
continue;
}
println!("Count: {}", count);
if count > 6 {
break;
}
}
let mut optional = Some(20);
match optional {
Some(i) => {
println!("Number: {:?}", i);
}
_ => {
println!("Not present");
}
}
if let Some(i) = Some(20) {
println!("Number is indeed: {}", i);
}
while let Some(i) = optional {
if i == 25 {
println!("Number is: {}", i);
optional = None;
} else {
println!("`i` is `{:?}`. Try again.", i);
optional = Some(i + 1);
}
}
}
fn isDivisibleBy(lhs: u32, rhs: u32) -> bool {
if (rhs == 0) {
return false;
}
lhs % rhs == 0
}
impl Point {
fn origin() -> Point {
Point { x: 0.0, y: 0.0 }
}
fn new(x: f32, y: f32) -> Point {
Point { x: x, y: y }
}
fn distance(&self, p: &Point) -> f32 {
let dx = self.x - p.x;
let dy: f32 = self.y - p.y;
(dx * dy + dy + dy).sqrt()
}
fn translate(&mut self, dx: f32, dy: f32) {
self.x += dx;
self.y += dy;
}
}
fn functions() {
println!("isD: {:?}", isDivisibleBy(20, 4));
println!("isD: {:?}", isDivisibleBy(20, 3));
let mut point = Point::new(22.2, 32.3);
let mut origin = Point::origin();
println!("Distance: {:?}", point.distance(&origin));
point.translate(3.0, -2.0);
println!("Point: {:?}", point);
println!("Distance: {:?}", point.distance(&origin));
let x = 20;
let fun1 = |i: i32| -> i32 { i + 1 + x };
let fun2 = |i| i + 1 + x;
let i = 1;
println!("Inc1: {:?}", fun1(i));
println!("Inc2: {:?}", fun2(i));
let one = || 1;
println!("One: {:?}", one());
let color = "Green";
let print = || println!("Color: {:?}", color);
print();
let _reborow = &color;
print();
let mut count = 0;
let mut inc = || {
count += 1;
println!("Count: {:?}", count);
};
inc();
inc();
let reborrow = &mut count;
let movable = Box::new(3);
let consume = || {
println!("Movable: {:?}", movable);
mem::drop(movable);
};
consume();
let haystack = vec![1, 2, 3];
let contains = move |needle| haystack.contains(needle);
println!("{}", contains(&1));
println!("{}", contains(&2));
}
fn apply<F>(f: F)
where
F: FnOnce(),
{
f();
}
fn apply_to_3<F>(f: F) -> i32
where
F: Fn(i32) -> i32,
{
f(3)
}
fn call_me<F: Fn()>(f: F) {
f();
}
fn function() {
println!("I am function");
}
fn functions2() | {
let x = 30;
println!("x: {:?}", x);
let y = apply_to_3(|x| x + 20);
println!("y: {:?}", y);
let greeting = "Hello";
let mut farewel = "Goodby".to_owned();
let diary = || {
println!("I said {}", greeting);
farewel.push_str("!!!!!");
println!("Than I screemed {}", farewel);
println!("Than I sleep");
| identifier_body | |
dec.rs | tuple (lhs, rhs).
Exp::InfixApp(lhs, func, rhs) => {
let val_info = get_val_info(&cx.env, *func)?;
let func_ty = instantiate(st, &val_info.ty_scheme);
let lhs_ty = ck_exp(cx, st, lhs)?;
let rhs_ty = ck_exp(cx, st, rhs)?;
let ret_ty = Ty::Var(st.new_ty_var(false));
let arrow_ty = Ty::Arrow(Ty::pair(lhs_ty, rhs_ty).into(), ret_ty.clone().into());
st.unify(exp.loc, func_ty, arrow_ty)?;
Ok(ret_ty)
}
// SML Definition (9)
Exp::Typed(inner, ty) => {
let exp_ty = ck_exp(cx, st, inner)?;
let ty_ty = ty::ck(cx, &st.tys, ty)?;
st.unify(exp.loc, ty_ty, exp_ty.clone())?;
Ok(exp_ty)
}
// SML Definition Appendix A - boolean operators are sugar for `if`
Exp::Andalso(lhs, rhs) | Exp::Orelse(lhs, rhs) => {
let lhs_ty = ck_exp(cx, st, lhs)?;
let rhs_ty = ck_exp(cx, st, rhs)?;
st.unify(lhs.loc, Ty::BOOL, lhs_ty)?;
st.unify(rhs.loc, Ty::BOOL, rhs_ty)?;
Ok(Ty::BOOL)
}
// SML Definition (10)
Exp::Handle(head, cases) => {
let head_ty = ck_exp(cx, st, head)?;
let (pats, arg_ty, res_ty) = ck_cases(cx, st, cases)?;
exhaustive::ck_handle(pats)?;
st.unify(exp.loc, Ty::EXN, arg_ty)?;
st.unify(exp.loc, head_ty.clone(), res_ty)?;
Ok(head_ty)
}
// SML Definition (11)
Exp::Raise(exp) => {
let exp_ty = ck_exp(cx, st, exp)?;
st.unify(exp.loc, Ty::EXN, exp_ty)?;
Ok(Ty::Var(st.new_ty_var(false)))
}
// SML Definition Appendix A - `if` is sugar for casing
Exp::If(cond, then_e, else_e) => {
let cond_ty = ck_exp(cx, st, cond)?;
let then_ty = ck_exp(cx, st, then_e)?;
let else_ty = ck_exp(cx, st, else_e)?;
st.unify(cond.loc, Ty::BOOL, cond_ty)?;
st.unify(exp.loc, then_ty.clone(), else_ty)?;
Ok(then_ty)
}
Exp::While(..) => Err(exp.loc.wrap(Error::Todo("`while`"))),
// SML Definition Appendix A - `case` is sugar for application to a `fn`
Exp::Case(head, cases) => {
let head_ty = ck_exp(cx, st, head)?;
let (pats, arg_ty, res_ty) = ck_cases(cx, st, cases)?;
exhaustive::ck_match(pats, exp.loc)?;
st.unify(exp.loc, head_ty, arg_ty)?;
Ok(res_ty)
}
// SML Definition (12)
Exp::Fn(cases) => {
let (pats, arg_ty, res_ty) = ck_cases(cx, st, cases)?;
exhaustive::ck_match(pats, exp.loc)?;
Ok(Ty::Arrow(arg_ty.into(), res_ty.into()))
}
}
}
/// SML Definition (13)
fn ck_cases(cx: &Cx, st: &mut State, cases: &Cases<StrRef>) -> Result<(Vec<Located<Pat>>, Ty, Ty)> {
let arg_ty = Ty::Var(st.new_ty_var(false));
let res_ty = Ty::Var(st.new_ty_var(false));
let mut pats = Vec::with_capacity(cases.arms.len());
// SML Definition (14)
for arm in cases.arms.iter() {
let (val_env, pat_ty, pat) = pat::ck(cx, st, &arm.pat)?;
pats.push(arm.pat.loc.wrap(pat));
let mut cx = cx.clone();
cx.env.val_env.extend(val_env);
let exp_ty = ck_exp(&cx, st, &arm.exp)?;
st.unify(arm.pat.loc, arg_ty.clone(), pat_ty)?;
st.unify(arm.exp.loc, res_ty.clone(), exp_ty)?;
}
Ok((pats, arg_ty, res_ty))
}
/// Returns `Ok(())` iff `name` is not a forbidden binding name. TODO there are more of these in
/// certain situations
fn ck_binding(name: Located<StrRef>) -> Result<()> {
let val = name.val;
if val == StrRef::TRUE
|| val == StrRef::FALSE
|| val == StrRef::NIL
|| val == StrRef::CONS
|| val == StrRef::REF
{
return Err(name.loc.wrap(Error::ForbiddenBinding(name.val)));
}
Ok(())
}
struct FunInfo {
args: Vec<TyVar>,
ret: TyVar,
}
fn fun_infos_to_ve(fun_infos: &HashMap<StrRef, FunInfo>) -> ValEnv |
pub fn ck(cx: &Cx, st: &mut State, dec: &Located<Dec<StrRef>>) -> Result<Env> {
match &dec.val {
// SML Definition (15)
Dec::Val(ty_vars, val_binds) => {
let mut cx_cl;
let cx = if ty_vars.is_empty() {
cx
} else {
cx_cl = cx.clone();
insert_ty_vars(&mut cx_cl, st, ty_vars)?;
&cx_cl
};
let mut val_env = ValEnv::new();
// SML Definition (25)
for val_bind in val_binds {
// SML Definition (26)
if val_bind.rec {
return Err(dec.loc.wrap(Error::Todo("recursive val binds")));
}
let (other, pat_ty, pat) = pat::ck(cx, st, &val_bind.pat)?;
for &name in other.keys() {
ck_binding(val_bind.pat.loc.wrap(name))?;
}
let exp_ty = ck_exp(cx, st, &val_bind.exp)?;
st.unify(dec.loc, pat_ty.clone(), exp_ty)?;
exhaustive::ck_bind(pat, val_bind.pat.loc)?;
for (name, mut val_info) in other {
generalize(cx, st, ty_vars, &mut val_info.ty_scheme);
let name = val_bind.pat.loc.wrap(name);
env_ins(&mut val_env, name, val_info, Item::Val)?;
}
}
Ok(val_env.into())
}
// SML Definition Appendix A - `fun` is sugar for `val rec` and `case`
Dec::Fun(ty_vars, fval_binds) => {
let mut cx_cl;
let cx = if ty_vars.is_empty() {
cx
} else {
cx_cl = cx.clone();
insert_ty_vars(&mut cx_cl, st, ty_vars)?;
&cx_cl
};
let mut fun_infos = HashMap::with_capacity(fval_binds.len());
for fval_bind in fval_binds {
let first = fval_bind.cases.first().unwrap();
let info = FunInfo {
args: first.pats.iter().map(|_| st.new_ty_var(false)).collect(),
ret: st.new_ty_var(false),
};
// copied from env_ins in util
if fun_infos.insert(first.vid.val, info).is_some() {
let err = Error::Duplicate(Item::Val, first.vid.val);
return Err(first.vid.loc.wrap(err));
}
}
for fval_bind in fval_binds {
let name = fval_bind.cases.first().unwrap().vid.val;
let info = fun_infos.get(&name).unwrap();
let mut arg_pats = Vec::with_capacity(fval_bind.cases.len());
for case in fval_bind.cases.iter() {
if name != case.vid.val {
let err = Error::FunDecNameMismatch(name, case.vid.val);
return Err(case.vid.loc.wrap(err));
}
if info.args.len() != case.pats.len() {
let err = Error::FunDecWrongNumPats(info.args.len(), case.pats.len());
let begin = case.pats.first().unwrap().loc;
let end = case.pats.last().unwrap().loc;
return Err(begin.span(end).wrap(err));
}
let mut pats_val_env = ValEnv::new();
let mut arg_pat = Vec::with_capacity(info.args.len());
for (pat, &tv) in case.pats.iter().zip(info.args.iter()) {
let (ve, pat_ty, new_pat) = pat::ck(cx, st, pat)?;
st.unify(pat.loc, Ty::Var(tv), pat_ty)?;
env_merge(&mut pats_val_env, | {
fun_infos
.iter()
.map(|(&name, fun_info)| {
let ty = fun_info
.args
.iter()
.rev()
.fold(Ty::Var(fun_info.ret), |ac, &tv| {
Ty::Arrow(Ty::Var(tv).into(), ac.into())
});
(name, ValInfo::val(TyScheme::mono(ty)))
})
.collect()
} | identifier_body |
dec.rs | a tuple (lhs, rhs).
Exp::InfixApp(lhs, func, rhs) => {
let val_info = get_val_info(&cx.env, *func)?;
let func_ty = instantiate(st, &val_info.ty_scheme);
let lhs_ty = ck_exp(cx, st, lhs)?;
let rhs_ty = ck_exp(cx, st, rhs)?;
let ret_ty = Ty::Var(st.new_ty_var(false));
let arrow_ty = Ty::Arrow(Ty::pair(lhs_ty, rhs_ty).into(), ret_ty.clone().into());
st.unify(exp.loc, func_ty, arrow_ty)?;
Ok(ret_ty)
}
// SML Definition (9)
Exp::Typed(inner, ty) => {
let exp_ty = ck_exp(cx, st, inner)?;
let ty_ty = ty::ck(cx, &st.tys, ty)?;
st.unify(exp.loc, ty_ty, exp_ty.clone())?;
Ok(exp_ty)
}
// SML Definition Appendix A - boolean operators are sugar for `if`
Exp::Andalso(lhs, rhs) | Exp::Orelse(lhs, rhs) => {
let lhs_ty = ck_exp(cx, st, lhs)?;
let rhs_ty = ck_exp(cx, st, rhs)?;
st.unify(lhs.loc, Ty::BOOL, lhs_ty)?;
st.unify(rhs.loc, Ty::BOOL, rhs_ty)?;
Ok(Ty::BOOL)
}
// SML Definition (10)
Exp::Handle(head, cases) => {
let head_ty = ck_exp(cx, st, head)?;
let (pats, arg_ty, res_ty) = ck_cases(cx, st, cases)?;
exhaustive::ck_handle(pats)?;
st.unify(exp.loc, Ty::EXN, arg_ty)?;
st.unify(exp.loc, head_ty.clone(), res_ty)?;
Ok(head_ty)
}
// SML Definition (11)
Exp::Raise(exp) => {
let exp_ty = ck_exp(cx, st, exp)?;
st.unify(exp.loc, Ty::EXN, exp_ty)?;
Ok(Ty::Var(st.new_ty_var(false)))
}
// SML Definition Appendix A - `if` is sugar for casing
Exp::If(cond, then_e, else_e) => {
let cond_ty = ck_exp(cx, st, cond)?;
let then_ty = ck_exp(cx, st, then_e)?;
let else_ty = ck_exp(cx, st, else_e)?;
st.unify(cond.loc, Ty::BOOL, cond_ty)?;
st.unify(exp.loc, then_ty.clone(), else_ty)?;
Ok(then_ty)
}
Exp::While(..) => Err(exp.loc.wrap(Error::Todo("`while`"))),
// SML Definition Appendix A - `case` is sugar for application to a `fn`
Exp::Case(head, cases) => {
let head_ty = ck_exp(cx, st, head)?;
let (pats, arg_ty, res_ty) = ck_cases(cx, st, cases)?;
exhaustive::ck_match(pats, exp.loc)?;
st.unify(exp.loc, head_ty, arg_ty)?;
Ok(res_ty)
}
// SML Definition (12)
Exp::Fn(cases) => {
let (pats, arg_ty, res_ty) = ck_cases(cx, st, cases)?;
exhaustive::ck_match(pats, exp.loc)?;
Ok(Ty::Arrow(arg_ty.into(), res_ty.into()))
}
}
}
/// SML Definition (13)
fn ck_cases(cx: &Cx, st: &mut State, cases: &Cases<StrRef>) -> Result<(Vec<Located<Pat>>, Ty, Ty)> {
let arg_ty = Ty::Var(st.new_ty_var(false));
let res_ty = Ty::Var(st.new_ty_var(false));
let mut pats = Vec::with_capacity(cases.arms.len());
// SML Definition (14)
for arm in cases.arms.iter() {
let (val_env, pat_ty, pat) = pat::ck(cx, st, &arm.pat)?;
pats.push(arm.pat.loc.wrap(pat));
let mut cx = cx.clone();
cx.env.val_env.extend(val_env);
let exp_ty = ck_exp(&cx, st, &arm.exp)?;
st.unify(arm.pat.loc, arg_ty.clone(), pat_ty)?;
st.unify(arm.exp.loc, res_ty.clone(), exp_ty)?;
}
Ok((pats, arg_ty, res_ty))
}
/// Returns `Ok(())` iff `name` is not a forbidden binding name. TODO there are more of these in
/// certain situations
fn ck_binding(name: Located<StrRef>) -> Result<()> {
let val = name.val;
if val == StrRef::TRUE
|| val == StrRef::FALSE
|| val == StrRef::NIL
|| val == StrRef::CONS
|| val == StrRef::REF
{
return Err(name.loc.wrap(Error::ForbiddenBinding(name.val)));
}
Ok(())
}
struct FunInfo {
args: Vec<TyVar>,
ret: TyVar,
}
fn fun_infos_to_ve(fun_infos: &HashMap<StrRef, FunInfo>) -> ValEnv {
fun_infos
.iter()
.map(|(&name, fun_info)| {
let ty = fun_info
.args
.iter()
.rev()
.fold(Ty::Var(fun_info.ret), |ac, &tv| {
Ty::Arrow(Ty::Var(tv).into(), ac.into())
});
(name, ValInfo::val(TyScheme::mono(ty)))
})
.collect()
}
pub fn ck(cx: &Cx, st: &mut State, dec: &Located<Dec<StrRef>>) -> Result<Env> {
match &dec.val {
// SML Definition (15)
Dec::Val(ty_vars, val_binds) => {
let mut cx_cl;
let cx = if ty_vars.is_empty() {
cx
} else {
cx_cl = cx.clone();
insert_ty_vars(&mut cx_cl, st, ty_vars)?;
&cx_cl
};
let mut val_env = ValEnv::new();
// SML Definition (25)
for val_bind in val_binds {
// SML Definition (26)
if val_bind.rec {
return Err(dec.loc.wrap(Error::Todo("recursive val binds")));
}
let (other, pat_ty, pat) = pat::ck(cx, st, &val_bind.pat)?;
for &name in other.keys() {
ck_binding(val_bind.pat.loc.wrap(name))?;
}
let exp_ty = ck_exp(cx, st, &val_bind.exp)?;
st.unify(dec.loc, pat_ty.clone(), exp_ty)?;
exhaustive::ck_bind(pat, val_bind.pat.loc)?;
for (name, mut val_info) in other {
generalize(cx, st, ty_vars, &mut val_info.ty_scheme);
let name = val_bind.pat.loc.wrap(name);
env_ins(&mut val_env, name, val_info, Item::Val)?;
}
}
Ok(val_env.into())
}
// SML Definition Appendix A - `fun` is sugar for `val rec` and `case` | Dec::Fun(ty_vars, fval_binds) => {
let mut cx_cl;
let cx = if ty_vars.is_empty() {
cx
} else {
cx_cl = cx.clone();
insert_ty_vars(&mut cx_cl, st, ty_vars)?;
&cx_cl
};
let mut fun_infos = HashMap::with_capacity(fval_binds.len());
for fval_bind in fval_binds {
let first = fval_bind.cases.first().unwrap();
let info = FunInfo {
args: first.pats.iter().map(|_| st.new_ty_var(false)).collect(),
ret: st.new_ty_var(false),
};
// copied from env_ins in util
if fun_infos.insert(first.vid.val, info).is_some() {
let err = Error::Duplicate(Item::Val, first.vid.val);
return Err(first.vid.loc.wrap(err));
}
}
for fval_bind in fval_binds {
let name = fval_bind.cases.first().unwrap().vid.val;
let info = fun_infos.get(&name).unwrap();
let mut arg_pats = Vec::with_capacity(fval_bind.cases.len());
for case in fval_bind.cases.iter() {
if name != case.vid.val {
let err = Error::FunDecNameMismatch(name, case.vid.val);
return Err(case.vid.loc.wrap(err));
}
if info.args.len() != case.pats.len() {
let err = Error::FunDecWrongNumPats(info.args.len(), case.pats.len());
let begin = case.pats.first().unwrap().loc;
let end = case.pats.last().unwrap().loc;
return Err(begin.span(end).wrap(err));
}
let mut pats_val_env = ValEnv::new();
let mut arg_pat = Vec::with_capacity(info.args.len());
for (pat, &tv) in case.pats.iter().zip(info.args.iter()) {
let (ve, pat_ty, new_pat) = pat::ck(cx, st, pat)?;
st.unify(pat.loc, Ty::Var(tv), pat_ty)?;
env_merge(&mut pats_val_env, ve | random_line_split | |
dec.rs | a tuple (lhs, rhs).
Exp::InfixApp(lhs, func, rhs) => {
let val_info = get_val_info(&cx.env, *func)?;
let func_ty = instantiate(st, &val_info.ty_scheme);
let lhs_ty = ck_exp(cx, st, lhs)?;
let rhs_ty = ck_exp(cx, st, rhs)?;
let ret_ty = Ty::Var(st.new_ty_var(false));
let arrow_ty = Ty::Arrow(Ty::pair(lhs_ty, rhs_ty).into(), ret_ty.clone().into());
st.unify(exp.loc, func_ty, arrow_ty)?;
Ok(ret_ty)
}
// SML Definition (9)
Exp::Typed(inner, ty) => {
let exp_ty = ck_exp(cx, st, inner)?;
let ty_ty = ty::ck(cx, &st.tys, ty)?;
st.unify(exp.loc, ty_ty, exp_ty.clone())?;
Ok(exp_ty)
}
// SML Definition Appendix A - boolean operators are sugar for `if`
Exp::Andalso(lhs, rhs) | Exp::Orelse(lhs, rhs) => {
let lhs_ty = ck_exp(cx, st, lhs)?;
let rhs_ty = ck_exp(cx, st, rhs)?;
st.unify(lhs.loc, Ty::BOOL, lhs_ty)?;
st.unify(rhs.loc, Ty::BOOL, rhs_ty)?;
Ok(Ty::BOOL)
}
// SML Definition (10)
Exp::Handle(head, cases) => {
let head_ty = ck_exp(cx, st, head)?;
let (pats, arg_ty, res_ty) = ck_cases(cx, st, cases)?;
exhaustive::ck_handle(pats)?;
st.unify(exp.loc, Ty::EXN, arg_ty)?;
st.unify(exp.loc, head_ty.clone(), res_ty)?;
Ok(head_ty)
}
// SML Definition (11)
Exp::Raise(exp) => {
let exp_ty = ck_exp(cx, st, exp)?;
st.unify(exp.loc, Ty::EXN, exp_ty)?;
Ok(Ty::Var(st.new_ty_var(false)))
}
// SML Definition Appendix A - `if` is sugar for casing
Exp::If(cond, then_e, else_e) => {
let cond_ty = ck_exp(cx, st, cond)?;
let then_ty = ck_exp(cx, st, then_e)?;
let else_ty = ck_exp(cx, st, else_e)?;
st.unify(cond.loc, Ty::BOOL, cond_ty)?;
st.unify(exp.loc, then_ty.clone(), else_ty)?;
Ok(then_ty)
}
Exp::While(..) => Err(exp.loc.wrap(Error::Todo("`while`"))),
// SML Definition Appendix A - `case` is sugar for application to a `fn`
Exp::Case(head, cases) => {
let head_ty = ck_exp(cx, st, head)?;
let (pats, arg_ty, res_ty) = ck_cases(cx, st, cases)?;
exhaustive::ck_match(pats, exp.loc)?;
st.unify(exp.loc, head_ty, arg_ty)?;
Ok(res_ty)
}
// SML Definition (12)
Exp::Fn(cases) => {
let (pats, arg_ty, res_ty) = ck_cases(cx, st, cases)?;
exhaustive::ck_match(pats, exp.loc)?;
Ok(Ty::Arrow(arg_ty.into(), res_ty.into()))
}
}
}
/// SML Definition (13)
fn | (cx: &Cx, st: &mut State, cases: &Cases<StrRef>) -> Result<(Vec<Located<Pat>>, Ty, Ty)> {
let arg_ty = Ty::Var(st.new_ty_var(false));
let res_ty = Ty::Var(st.new_ty_var(false));
let mut pats = Vec::with_capacity(cases.arms.len());
// SML Definition (14)
for arm in cases.arms.iter() {
let (val_env, pat_ty, pat) = pat::ck(cx, st, &arm.pat)?;
pats.push(arm.pat.loc.wrap(pat));
let mut cx = cx.clone();
cx.env.val_env.extend(val_env);
let exp_ty = ck_exp(&cx, st, &arm.exp)?;
st.unify(arm.pat.loc, arg_ty.clone(), pat_ty)?;
st.unify(arm.exp.loc, res_ty.clone(), exp_ty)?;
}
Ok((pats, arg_ty, res_ty))
}
/// Returns `Ok(())` iff `name` is not a forbidden binding name. TODO there are more of these in
/// certain situations
fn ck_binding(name: Located<StrRef>) -> Result<()> {
let val = name.val;
if val == StrRef::TRUE
|| val == StrRef::FALSE
|| val == StrRef::NIL
|| val == StrRef::CONS
|| val == StrRef::REF
{
return Err(name.loc.wrap(Error::ForbiddenBinding(name.val)));
}
Ok(())
}
struct FunInfo {
args: Vec<TyVar>,
ret: TyVar,
}
fn fun_infos_to_ve(fun_infos: &HashMap<StrRef, FunInfo>) -> ValEnv {
fun_infos
.iter()
.map(|(&name, fun_info)| {
let ty = fun_info
.args
.iter()
.rev()
.fold(Ty::Var(fun_info.ret), |ac, &tv| {
Ty::Arrow(Ty::Var(tv).into(), ac.into())
});
(name, ValInfo::val(TyScheme::mono(ty)))
})
.collect()
}
pub fn ck(cx: &Cx, st: &mut State, dec: &Located<Dec<StrRef>>) -> Result<Env> {
match &dec.val {
// SML Definition (15)
Dec::Val(ty_vars, val_binds) => {
let mut cx_cl;
let cx = if ty_vars.is_empty() {
cx
} else {
cx_cl = cx.clone();
insert_ty_vars(&mut cx_cl, st, ty_vars)?;
&cx_cl
};
let mut val_env = ValEnv::new();
// SML Definition (25)
for val_bind in val_binds {
// SML Definition (26)
if val_bind.rec {
return Err(dec.loc.wrap(Error::Todo("recursive val binds")));
}
let (other, pat_ty, pat) = pat::ck(cx, st, &val_bind.pat)?;
for &name in other.keys() {
ck_binding(val_bind.pat.loc.wrap(name))?;
}
let exp_ty = ck_exp(cx, st, &val_bind.exp)?;
st.unify(dec.loc, pat_ty.clone(), exp_ty)?;
exhaustive::ck_bind(pat, val_bind.pat.loc)?;
for (name, mut val_info) in other {
generalize(cx, st, ty_vars, &mut val_info.ty_scheme);
let name = val_bind.pat.loc.wrap(name);
env_ins(&mut val_env, name, val_info, Item::Val)?;
}
}
Ok(val_env.into())
}
// SML Definition Appendix A - `fun` is sugar for `val rec` and `case`
Dec::Fun(ty_vars, fval_binds) => {
let mut cx_cl;
let cx = if ty_vars.is_empty() {
cx
} else {
cx_cl = cx.clone();
insert_ty_vars(&mut cx_cl, st, ty_vars)?;
&cx_cl
};
let mut fun_infos = HashMap::with_capacity(fval_binds.len());
for fval_bind in fval_binds {
let first = fval_bind.cases.first().unwrap();
let info = FunInfo {
args: first.pats.iter().map(|_| st.new_ty_var(false)).collect(),
ret: st.new_ty_var(false),
};
// copied from env_ins in util
if fun_infos.insert(first.vid.val, info).is_some() {
let err = Error::Duplicate(Item::Val, first.vid.val);
return Err(first.vid.loc.wrap(err));
}
}
for fval_bind in fval_binds {
let name = fval_bind.cases.first().unwrap().vid.val;
let info = fun_infos.get(&name).unwrap();
let mut arg_pats = Vec::with_capacity(fval_bind.cases.len());
for case in fval_bind.cases.iter() {
if name != case.vid.val {
let err = Error::FunDecNameMismatch(name, case.vid.val);
return Err(case.vid.loc.wrap(err));
}
if info.args.len() != case.pats.len() {
let err = Error::FunDecWrongNumPats(info.args.len(), case.pats.len());
let begin = case.pats.first().unwrap().loc;
let end = case.pats.last().unwrap().loc;
return Err(begin.span(end).wrap(err));
}
let mut pats_val_env = ValEnv::new();
let mut arg_pat = Vec::with_capacity(info.args.len());
for (pat, &tv) in case.pats.iter().zip(info.args.iter()) {
let (ve, pat_ty, new_pat) = pat::ck(cx, st, pat)?;
st.unify(pat.loc, Ty::Var(tv), pat_ty)?;
env_merge(&mut pats_val_env, | ck_cases | identifier_name |
gittrack_controller.go | itTrack,
namespace: farosflags.Namespace,
clusterGitTrackMode: farosflags.ClusterGitTrack,
}
opts := &reconcileGitTrackOpts{
gitTrackMode: farosflags.GitTrack,
clusterGitTrackMode: farosflags.ClusterGitTrack,
}
return rec, opts
}
// reconcileGitTrackOpts is the mode that we're running the reconciler
// in. Being able to change these options during runtime is helpful during tests.
type reconcileGitTrackOpts struct {
clusterGitTrackMode farosflags.ClusterGitTrackMode
gitTrackMode farosflags.GitTrackMode
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler, opts *reconcileGitTrackOpts) error {
// Create a new controller
c, err := controller.New("gittrack-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
if opts.gitTrackMode == farosflags.GTMEnabled {
// Watch for changes to GitTrack
err = c.Watch(&source.Kind{Type: &farosv1alpha1.GitTrack{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &farosv1alpha1.GitTrackObject{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &farosv1alpha1.GitTrack{},
})
if err != nil {
return err
}
}
if opts.clusterGitTrackMode != farosflags.CGTMDisabled {
// Watch for changes to ClusterGitTrack
err = c.Watch(&source.Kind{Type: &farosv1alpha1.ClusterGitTrack{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &farosv1alpha1.ClusterGitTrackObject{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &farosv1alpha1.ClusterGitTrack{},
})
if err != nil {
return err
}
if opts.clusterGitTrackMode == farosflags.CGTMIncludeNamespaced {
err = c.Watch(&source.Kind{Type: &farosv1alpha1.GitTrackObject{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &farosv1alpha1.ClusterGitTrack{},
})
if err != nil {
return err
}
}
}
return nil
}
var _ reconcile.Reconciler = &ReconcileGitTrack{}
// ReconcileGitTrack reconciles a GitTrack object
type ReconcileGitTrack struct {
client.Client
scheme *runtime.Scheme
store *gitstore.RepoStore
restMapper meta.RESTMapper
recorder record.EventRecorder
ignoredGVRs map[schema.GroupVersionResource]interface{}
lastUpdateTimes map[string]time.Time
mutex *sync.RWMutex
applier farosclient.Client
log logr.Logger
gitTrackMode farosflags.GitTrackMode
namespace string
clusterGitTrackMode farosflags.ClusterGitTrackMode
}
func (r *ReconcileGitTrack) withValues(keysAndValues ...interface{}) *ReconcileGitTrack {
reconciler := *r
reconciler.log = r.log.WithValues(keysAndValues...)
return &reconciler
}
// fetchInstance attempts to fetch the GitTrack resource by the name in the given Request
func (r *ReconcileGitTrack) fetchInstance(req reconcile.Request) (farosv1alpha1.GitTrackInterface, error) {
var instance farosv1alpha1.GitTrackInterface
if req.Namespace != "" {
instance = &farosv1alpha1.GitTrack{}
} else {
instance = &farosv1alpha1.ClusterGitTrack{}
}
err := r.Get(context.TODO(), req.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return nil, nil
}
// Error reading the object - requeue the request.
return nil, err
}
return instance, nil
}
// listObjectsByName lists and filters GitTrackObjects by the `faros.pusher.com/owned-by` label,
// and returns a map of names to GitTrackObject mappings
func (r *ReconcileGitTrack) listObjectsByName(owner farosv1alpha1.GitTrackInterface) (map[string]farosv1alpha1.GitTrackObjectInterface, error) {
result := make(map[string]farosv1alpha1.GitTrackObjectInterface)
gtos := &farosv1alpha1.GitTrackObjectList{}
err := r.List(context.TODO(), gtos)
if err != nil {
return nil, err
}
for _, gto := range gtos.Items {
if metav1.IsControlledBy(>o, owner) {
result[gto.GetNamespacedName()] = gto.DeepCopy()
}
}
cgtos := &farosv1alpha1.ClusterGitTrackObjectList{}
err = r.List(context.TODO(), cgtos)
if err != nil {
return nil, err
}
for _, cgto := range cgtos.Items {
if metav1.IsControlledBy(&cgto, owner) {
result[cgto.GetNamespacedName()] = cgto.DeepCopy()
}
}
return result, nil
}
// objectResult represents the result of creating or updating a GitTrackObject
type objectResult struct {
NamespacedName string
Error error
Ignored bool
Reason string
InSync bool
TimeToDeploy time.Duration
}
// errorResult is a convenience function for creating an error result
func errorResult(namespacedName string, err error) objectResult {
return objectResult{NamespacedName: namespacedName, Error: err, Ignored: true}
} | }
// successResult is a convenience function for creating a success objectResult
func successResult(namespacedName string, timeToDeploy time.Duration, inSync bool) objectResult {
return objectResult{NamespacedName: namespacedName, TimeToDeploy: timeToDeploy, InSync: inSync}
}
func (r *ReconcileGitTrack) newGitTrackObjectInterface(name string, u *unstructured.Unstructured) (farosv1alpha1.GitTrackObjectInterface, error) {
var instance farosv1alpha1.GitTrackObjectInterface
_, namespaced, err := utils.GetAPIResource(r.restMapper, u.GetObjectKind().GroupVersionKind())
if err != nil {
return nil, fmt.Errorf("error getting API resource: %v", err)
}
if namespaced {
instance = &farosv1alpha1.GitTrackObject{
TypeMeta: farosv1alpha1.GitTrackObjectTypeMeta,
}
} else {
instance = &farosv1alpha1.ClusterGitTrackObject{
TypeMeta: farosv1alpha1.ClusterGitTrackObjectTypeMeta,
}
}
instance.SetName(name)
instance.SetNamespace(u.GetNamespace())
data, err := u.MarshalJSON()
if err != nil {
return nil, fmt.Errorf("error marshalling JSON: %v", err)
}
instance.SetSpec(farosv1alpha1.GitTrackObjectSpec{
Name: u.GetName(),
Kind: u.GetKind(),
Data: data,
})
return instance, nil
}
// objectName constructs a name from an Unstructured object
func objectName(u *unstructured.Unstructured) string {
return strings.ToLower(fmt.Sprintf("%s-%s", u.GetKind(), strings.Replace(u.GetName(), ":", "-", -1)))
}
// handleObject either creates or updates a GitTrackObject
func (r *ReconcileGitTrack) handleObject(u *unstructured.Unstructured, owner farosv1alpha1.GitTrackInterface) objectResult {
name := objectName(u)
gto, err := r.newGitTrackObjectInterface(name, u)
if err != nil {
namespacedName := strings.TrimLeft(fmt.Sprintf("%s/%s", u.GetNamespace(), name), "/")
return errorResult(namespacedName, err)
}
ignored, reason, err := r.ignoreObject(u, owner)
if err != nil {
return errorResult(gto.GetNamespacedName(), err)
}
if ignored {
return ignoreResult(gto.GetNamespacedName(), reason)
}
r.mutex.RLock()
timeToDeploy := time.Now().Sub(r.lastUpdateTimes[owner.GetSpec().Repository])
r.mutex.RUnlock()
if err = controllerutil.SetControllerReference(owner, gto, r.scheme); err != nil {
return errorResult(gto.GetNamespacedName(), err)
}
found := gto.DeepCopyInterface()
err = r.Get(context.TODO(), types.NamespacedName{Name: gto.GetName(), Namespace: g |
// ignoreResult is a convenience function for creating an ignore objectResult
func ignoreResult(namespacedName string, reason string) objectResult {
return objectResult{NamespacedName: namespacedName, Ignored: true, Reason: reason} | random_line_split |
gittrack_controller.go |
applier, err := farosclient.NewApplier(mgr.GetConfig(), farosclient.Options{})
if err != nil {
panic(fmt.Errorf("unable to create applier: %v", err))
}
rec := &ReconcileGitTrack{
Client: mgr.GetClient(),
scheme: mgr.GetScheme(),
store: gitstore.NewRepoStore(farosflags.RepositoryDir),
restMapper: restMapper,
recorder: mgr.GetEventRecorderFor("gittrack-controller"),
ignoredGVRs: gvrs,
lastUpdateTimes: make(map[string]time.Time),
mutex: &sync.RWMutex{},
applier: applier,
log: rlogr.Log.WithName("gittrack-controller"),
gitTrackMode: farosflags.GitTrack,
namespace: farosflags.Namespace,
clusterGitTrackMode: farosflags.ClusterGitTrack,
}
opts := &reconcileGitTrackOpts{
gitTrackMode: farosflags.GitTrack,
clusterGitTrackMode: farosflags.ClusterGitTrack,
}
return rec, opts
}
// reconcileGitTrackOpts is the mode that we're running the reconciler
// in. Being able to change these options during runtime is helpful during tests.
type reconcileGitTrackOpts struct {
clusterGitTrackMode farosflags.ClusterGitTrackMode
gitTrackMode farosflags.GitTrackMode
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler, opts *reconcileGitTrackOpts) error {
// Create a new controller
c, err := controller.New("gittrack-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
if opts.gitTrackMode == farosflags.GTMEnabled {
// Watch for changes to GitTrack
err = c.Watch(&source.Kind{Type: &farosv1alpha1.GitTrack{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &farosv1alpha1.GitTrackObject{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &farosv1alpha1.GitTrack{},
})
if err != nil {
return err
}
}
if opts.clusterGitTrackMode != farosflags.CGTMDisabled {
// Watch for changes to ClusterGitTrack
err = c.Watch(&source.Kind{Type: &farosv1alpha1.ClusterGitTrack{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &farosv1alpha1.ClusterGitTrackObject{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &farosv1alpha1.ClusterGitTrack{},
})
if err != nil {
return err
}
if opts.clusterGitTrackMode == farosflags.CGTMIncludeNamespaced {
err = c.Watch(&source.Kind{Type: &farosv1alpha1.GitTrackObject{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &farosv1alpha1.ClusterGitTrack{},
})
if err != nil {
return err
}
}
}
return nil
}
var _ reconcile.Reconciler = &ReconcileGitTrack{}
// ReconcileGitTrack reconciles a GitTrack object
type ReconcileGitTrack struct {
client.Client
scheme *runtime.Scheme
store *gitstore.RepoStore
restMapper meta.RESTMapper
recorder record.EventRecorder
ignoredGVRs map[schema.GroupVersionResource]interface{}
lastUpdateTimes map[string]time.Time
mutex *sync.RWMutex
applier farosclient.Client
log logr.Logger
gitTrackMode farosflags.GitTrackMode
namespace string
clusterGitTrackMode farosflags.ClusterGitTrackMode
}
func (r *ReconcileGitTrack) withValues(keysAndValues ...interface{}) *ReconcileGitTrack {
reconciler := *r
reconciler.log = r.log.WithValues(keysAndValues...)
return &reconciler
}
// fetchInstance attempts to fetch the GitTrack resource by the name in the given Request
func (r *ReconcileGitTrack) fetchInstance(req reconcile.Request) (farosv1alpha1.GitTrackInterface, error) {
var instance farosv1alpha1.GitTrackInterface
if req.Namespace != "" {
instance = &farosv1alpha1.GitTrack{}
} else {
instance = &farosv1alpha1.ClusterGitTrack{}
}
err := r.Get(context.TODO(), req.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return nil, nil
}
// Error reading the object - requeue the request.
return nil, err
}
return instance, nil
}
// listObjectsByName lists and filters GitTrackObjects by the `faros.pusher.com/owned-by` label,
// and returns a map of names to GitTrackObject mappings
func (r *ReconcileGitTrack) listObjectsByName(owner farosv1alpha1.GitTrackInterface) (map[string]farosv1alpha1.GitTrackObjectInterface, error) {
result := make(map[string]farosv1alpha1.GitTrackObjectInterface)
gtos := &farosv1alpha1.GitTrackObjectList{}
err := r.List(context.TODO(), gtos)
if err != nil {
return nil, err
}
for _, gto := range gtos.Items {
if metav1.IsControlledBy(>o, owner) {
result[gto.GetNamespacedName()] = gto.DeepCopy()
}
}
cgtos := &farosv1alpha1.ClusterGitTrackObjectList{}
err = r.List(context.TODO(), cgtos)
if err != nil {
return nil, err
}
for _, cgto := range cgtos.Items {
if metav1.IsControlledBy(&cgto, owner) {
result[cgto.GetNamespacedName()] = cgto.DeepCopy()
}
}
return result, nil
}
// objectResult represents the result of creating or updating a GitTrackObject
type objectResult struct {
NamespacedName string
Error error
Ignored bool
Reason string
InSync bool
TimeToDeploy time.Duration
}
// errorResult is a convenience function for creating an error result
func errorResult(namespacedName string, err error) objectResult {
return objectResult{NamespacedName: namespacedName, Error: err, Ignored: true}
}
// ignoreResult is a convenience function for creating an ignore objectResult
func ignoreResult(namespacedName string, reason string) objectResult {
return objectResult{NamespacedName: namespacedName, Ignored: true, Reason: reason}
}
// successResult is a convenience function for creating a success objectResult
func successResult(namespacedName string, timeToDeploy time.Duration, inSync bool) objectResult {
return objectResult{NamespacedName: namespacedName, TimeToDeploy: timeToDeploy, InSync: inSync}
}
func (r *ReconcileGitTrack) newGitTrackObjectInterface(name string, u *unstructured.Unstructured) (farosv1alpha1.GitTrackObjectInterface, error) {
var instance farosv1alpha1.GitTrackObjectInterface
_, namespaced, err := utils.GetAPIResource(r.restMapper, u.GetObjectKind().GroupVersionKind())
if err != nil {
return nil, fmt.Errorf("error getting API resource: %v", err)
}
if namespaced {
instance = &farosv1alpha1.GitTrackObject{
TypeMeta: farosv1alpha1.GitTrackObjectTypeMeta,
}
} else {
instance = &farosv1alpha1.ClusterGitTrackObject{
TypeMeta: farosv1alpha1.ClusterGitTrackObjectTypeMeta,
}
}
instance.SetName(name)
instance.SetNamespace(u.GetNamespace())
data, err := u.MarshalJSON()
if err != nil {
return nil, fmt.Errorf("error marshalling JSON: %v", err)
}
instance.SetSpec(farosv1alpha1.GitTrackObjectSpec{
Name: u.GetName(),
Kind: u.GetKind(),
Data: data,
})
return instance, nil
}
// objectName constructs a name from an Unstructured object
func objectName(u *unstructured.Unstructured) string {
return strings.ToLower(fmt.Sprintf("%s-%s", u.GetKind(), strings.Replace(u.GetName(), ":", "-", -1)))
}
// handleObject either creates or updates a GitTrackObject
func (r *ReconcileGitTrack) handleObject(u *unstructured.Unstructured, owner farosv1alpha1.GitTrackInterface) objectResult {
name := objectName(u)
gto, | {
panic(fmt.Errorf("unable to parse ignored resources: %v", err))
} | conditional_block | |
gittrack_controller.go | Track,
namespace: farosflags.Namespace,
clusterGitTrackMode: farosflags.ClusterGitTrack,
}
opts := &reconcileGitTrackOpts{
gitTrackMode: farosflags.GitTrack,
clusterGitTrackMode: farosflags.ClusterGitTrack,
}
return rec, opts
}
// reconcileGitTrackOpts is the mode that we're running the reconciler
// in. Being able to change these options during runtime is helpful during tests.
type reconcileGitTrackOpts struct {
clusterGitTrackMode farosflags.ClusterGitTrackMode
gitTrackMode farosflags.GitTrackMode
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler, opts *reconcileGitTrackOpts) error {
// Create a new controller
c, err := controller.New("gittrack-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
if opts.gitTrackMode == farosflags.GTMEnabled {
// Watch for changes to GitTrack
err = c.Watch(&source.Kind{Type: &farosv1alpha1.GitTrack{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &farosv1alpha1.GitTrackObject{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &farosv1alpha1.GitTrack{},
})
if err != nil {
return err
}
}
if opts.clusterGitTrackMode != farosflags.CGTMDisabled {
// Watch for changes to ClusterGitTrack
err = c.Watch(&source.Kind{Type: &farosv1alpha1.ClusterGitTrack{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &farosv1alpha1.ClusterGitTrackObject{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &farosv1alpha1.ClusterGitTrack{},
})
if err != nil {
return err
}
if opts.clusterGitTrackMode == farosflags.CGTMIncludeNamespaced {
err = c.Watch(&source.Kind{Type: &farosv1alpha1.GitTrackObject{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &farosv1alpha1.ClusterGitTrack{},
})
if err != nil {
return err
}
}
}
return nil
}
var _ reconcile.Reconciler = &ReconcileGitTrack{}
// ReconcileGitTrack reconciles a GitTrack object
type ReconcileGitTrack struct {
client.Client
scheme *runtime.Scheme
store *gitstore.RepoStore
restMapper meta.RESTMapper
recorder record.EventRecorder
ignoredGVRs map[schema.GroupVersionResource]interface{}
lastUpdateTimes map[string]time.Time
mutex *sync.RWMutex
applier farosclient.Client
log logr.Logger
gitTrackMode farosflags.GitTrackMode
namespace string
clusterGitTrackMode farosflags.ClusterGitTrackMode
}
func (r *ReconcileGitTrack) withValues(keysAndValues ...interface{}) *ReconcileGitTrack {
reconciler := *r
reconciler.log = r.log.WithValues(keysAndValues...)
return &reconciler
}
// fetchInstance attempts to fetch the GitTrack resource by the name in the given Request
func (r *ReconcileGitTrack) fetchInstance(req reconcile.Request) (farosv1alpha1.GitTrackInterface, error) |
// listObjectsByName lists and filters GitTrackObjects by the `faros.pusher.com/owned-by` label,
// and returns a map of names to GitTrackObject mappings
func (r *ReconcileGitTrack) listObjectsByName(owner farosv1alpha1.GitTrackInterface) (map[string]farosv1alpha1.GitTrackObjectInterface, error) {
result := make(map[string]farosv1alpha1.GitTrackObjectInterface)
gtos := &farosv1alpha1.GitTrackObjectList{}
err := r.List(context.TODO(), gtos)
if err != nil {
return nil, err
}
for _, gto := range gtos.Items {
if metav1.IsControlledBy(>o, owner) {
result[gto.GetNamespacedName()] = gto.DeepCopy()
}
}
cgtos := &farosv1alpha1.ClusterGitTrackObjectList{}
err = r.List(context.TODO(), cgtos)
if err != nil {
return nil, err
}
for _, cgto := range cgtos.Items {
if metav1.IsControlledBy(&cgto, owner) {
result[cgto.GetNamespacedName()] = cgto.DeepCopy()
}
}
return result, nil
}
// objectResult represents the result of creating or updating a GitTrackObject
type objectResult struct {
NamespacedName string
Error error
Ignored bool
Reason string
InSync bool
TimeToDeploy time.Duration
}
// errorResult is a convenience function for creating an error result
func errorResult(namespacedName string, err error) objectResult {
return objectResult{NamespacedName: namespacedName, Error: err, Ignored: true}
}
// ignoreResult is a convenience function for creating an ignore objectResult
func ignoreResult(namespacedName string, reason string) objectResult {
return objectResult{NamespacedName: namespacedName, Ignored: true, Reason: reason}
}
// successResult is a convenience function for creating a success objectResult
func successResult(namespacedName string, timeToDeploy time.Duration, inSync bool) objectResult {
return objectResult{NamespacedName: namespacedName, TimeToDeploy: timeToDeploy, InSync: inSync}
}
func (r *ReconcileGitTrack) newGitTrackObjectInterface(name string, u *unstructured.Unstructured) (farosv1alpha1.GitTrackObjectInterface, error) {
var instance farosv1alpha1.GitTrackObjectInterface
_, namespaced, err := utils.GetAPIResource(r.restMapper, u.GetObjectKind().GroupVersionKind())
if err != nil {
return nil, fmt.Errorf("error getting API resource: %v", err)
}
if namespaced {
instance = &farosv1alpha1.GitTrackObject{
TypeMeta: farosv1alpha1.GitTrackObjectTypeMeta,
}
} else {
instance = &farosv1alpha1.ClusterGitTrackObject{
TypeMeta: farosv1alpha1.ClusterGitTrackObjectTypeMeta,
}
}
instance.SetName(name)
instance.SetNamespace(u.GetNamespace())
data, err := u.MarshalJSON()
if err != nil {
return nil, fmt.Errorf("error marshalling JSON: %v", err)
}
instance.SetSpec(farosv1alpha1.GitTrackObjectSpec{
Name: u.GetName(),
Kind: u.GetKind(),
Data: data,
})
return instance, nil
}
// objectName constructs a name from an Unstructured object
func objectName(u *unstructured.Unstructured) string {
return strings.ToLower(fmt.Sprintf("%s-%s", u.GetKind(), strings.Replace(u.GetName(), ":", "-", -1)))
}
// handleObject either creates or updates a GitTrackObject
func (r *ReconcileGitTrack) handleObject(u *unstructured.Unstructured, owner farosv1alpha1.GitTrackInterface) objectResult {
name := objectName(u)
gto, err := r.newGitTrackObjectInterface(name, u)
if err != nil {
namespacedName := strings.TrimLeft(fmt.Sprintf("%s/%s", u.GetNamespace(), name), "/")
return errorResult(namespacedName, err)
}
ignored, reason, err := r.ignoreObject(u, owner)
if err != nil {
return errorResult(gto.GetNamespacedName(), err)
}
if ignored {
return ignoreResult(gto.GetNamespacedName(), reason)
}
r.mutex.RLock()
timeToDeploy := time.Now().Sub(r.lastUpdateTimes[owner.GetSpec().Repository])
r.mutex.RUnlock()
if err = controllerutil.SetControllerReference(owner, gto, r.scheme); err != nil {
return errorResult(gto.GetNamespacedName(), err)
}
found := gto.DeepCopyInterface()
err = r.Get(context.TODO(), types.NamespacedName{Name: gto.GetName(), Namespace: g | {
var instance farosv1alpha1.GitTrackInterface
if req.Namespace != "" {
instance = &farosv1alpha1.GitTrack{}
} else {
instance = &farosv1alpha1.ClusterGitTrack{}
}
err := r.Get(context.TODO(), req.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return nil, nil
}
// Error reading the object - requeue the request.
return nil, err
}
return instance, nil
} | identifier_body |
gittrack_controller.go | itTrack,
namespace: farosflags.Namespace,
clusterGitTrackMode: farosflags.ClusterGitTrack,
}
opts := &reconcileGitTrackOpts{
gitTrackMode: farosflags.GitTrack,
clusterGitTrackMode: farosflags.ClusterGitTrack,
}
return rec, opts
}
// reconcileGitTrackOpts is the mode that we're running the reconciler
// in. Being able to change these options during runtime is helpful during tests.
type reconcileGitTrackOpts struct {
clusterGitTrackMode farosflags.ClusterGitTrackMode
gitTrackMode farosflags.GitTrackMode
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func | (mgr manager.Manager, r reconcile.Reconciler, opts *reconcileGitTrackOpts) error {
// Create a new controller
c, err := controller.New("gittrack-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
if opts.gitTrackMode == farosflags.GTMEnabled {
// Watch for changes to GitTrack
err = c.Watch(&source.Kind{Type: &farosv1alpha1.GitTrack{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &farosv1alpha1.GitTrackObject{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &farosv1alpha1.GitTrack{},
})
if err != nil {
return err
}
}
if opts.clusterGitTrackMode != farosflags.CGTMDisabled {
// Watch for changes to ClusterGitTrack
err = c.Watch(&source.Kind{Type: &farosv1alpha1.ClusterGitTrack{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &farosv1alpha1.ClusterGitTrackObject{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &farosv1alpha1.ClusterGitTrack{},
})
if err != nil {
return err
}
if opts.clusterGitTrackMode == farosflags.CGTMIncludeNamespaced {
err = c.Watch(&source.Kind{Type: &farosv1alpha1.GitTrackObject{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &farosv1alpha1.ClusterGitTrack{},
})
if err != nil {
return err
}
}
}
return nil
}
var _ reconcile.Reconciler = &ReconcileGitTrack{}
// ReconcileGitTrack reconciles a GitTrack object
type ReconcileGitTrack struct {
client.Client
scheme *runtime.Scheme
store *gitstore.RepoStore
restMapper meta.RESTMapper
recorder record.EventRecorder
ignoredGVRs map[schema.GroupVersionResource]interface{}
lastUpdateTimes map[string]time.Time
mutex *sync.RWMutex
applier farosclient.Client
log logr.Logger
gitTrackMode farosflags.GitTrackMode
namespace string
clusterGitTrackMode farosflags.ClusterGitTrackMode
}
func (r *ReconcileGitTrack) withValues(keysAndValues ...interface{}) *ReconcileGitTrack {
reconciler := *r
reconciler.log = r.log.WithValues(keysAndValues...)
return &reconciler
}
// fetchInstance attempts to fetch the GitTrack resource by the name in the given Request
func (r *ReconcileGitTrack) fetchInstance(req reconcile.Request) (farosv1alpha1.GitTrackInterface, error) {
var instance farosv1alpha1.GitTrackInterface
if req.Namespace != "" {
instance = &farosv1alpha1.GitTrack{}
} else {
instance = &farosv1alpha1.ClusterGitTrack{}
}
err := r.Get(context.TODO(), req.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return nil, nil
}
// Error reading the object - requeue the request.
return nil, err
}
return instance, nil
}
// listObjectsByName lists and filters GitTrackObjects by the `faros.pusher.com/owned-by` label,
// and returns a map of names to GitTrackObject mappings
func (r *ReconcileGitTrack) listObjectsByName(owner farosv1alpha1.GitTrackInterface) (map[string]farosv1alpha1.GitTrackObjectInterface, error) {
result := make(map[string]farosv1alpha1.GitTrackObjectInterface)
gtos := &farosv1alpha1.GitTrackObjectList{}
err := r.List(context.TODO(), gtos)
if err != nil {
return nil, err
}
for _, gto := range gtos.Items {
if metav1.IsControlledBy(>o, owner) {
result[gto.GetNamespacedName()] = gto.DeepCopy()
}
}
cgtos := &farosv1alpha1.ClusterGitTrackObjectList{}
err = r.List(context.TODO(), cgtos)
if err != nil {
return nil, err
}
for _, cgto := range cgtos.Items {
if metav1.IsControlledBy(&cgto, owner) {
result[cgto.GetNamespacedName()] = cgto.DeepCopy()
}
}
return result, nil
}
// objectResult represents the result of creating or updating a GitTrackObject
type objectResult struct {
NamespacedName string
Error error
Ignored bool
Reason string
InSync bool
TimeToDeploy time.Duration
}
// errorResult is a convenience function for creating an error result
func errorResult(namespacedName string, err error) objectResult {
return objectResult{NamespacedName: namespacedName, Error: err, Ignored: true}
}
// ignoreResult is a convenience function for creating an ignore objectResult
func ignoreResult(namespacedName string, reason string) objectResult {
return objectResult{NamespacedName: namespacedName, Ignored: true, Reason: reason}
}
// successResult is a convenience function for creating a success objectResult
func successResult(namespacedName string, timeToDeploy time.Duration, inSync bool) objectResult {
return objectResult{NamespacedName: namespacedName, TimeToDeploy: timeToDeploy, InSync: inSync}
}
func (r *ReconcileGitTrack) newGitTrackObjectInterface(name string, u *unstructured.Unstructured) (farosv1alpha1.GitTrackObjectInterface, error) {
var instance farosv1alpha1.GitTrackObjectInterface
_, namespaced, err := utils.GetAPIResource(r.restMapper, u.GetObjectKind().GroupVersionKind())
if err != nil {
return nil, fmt.Errorf("error getting API resource: %v", err)
}
if namespaced {
instance = &farosv1alpha1.GitTrackObject{
TypeMeta: farosv1alpha1.GitTrackObjectTypeMeta,
}
} else {
instance = &farosv1alpha1.ClusterGitTrackObject{
TypeMeta: farosv1alpha1.ClusterGitTrackObjectTypeMeta,
}
}
instance.SetName(name)
instance.SetNamespace(u.GetNamespace())
data, err := u.MarshalJSON()
if err != nil {
return nil, fmt.Errorf("error marshalling JSON: %v", err)
}
instance.SetSpec(farosv1alpha1.GitTrackObjectSpec{
Name: u.GetName(),
Kind: u.GetKind(),
Data: data,
})
return instance, nil
}
// objectName constructs a name from an Unstructured object
func objectName(u *unstructured.Unstructured) string {
return strings.ToLower(fmt.Sprintf("%s-%s", u.GetKind(), strings.Replace(u.GetName(), ":", "-", -1)))
}
// handleObject either creates or updates a GitTrackObject
func (r *ReconcileGitTrack) handleObject(u *unstructured.Unstructured, owner farosv1alpha1.GitTrackInterface) objectResult {
name := objectName(u)
gto, err := r.newGitTrackObjectInterface(name, u)
if err != nil {
namespacedName := strings.TrimLeft(fmt.Sprintf("%s/%s", u.GetNamespace(), name), "/")
return errorResult(namespacedName, err)
}
ignored, reason, err := r.ignoreObject(u, owner)
if err != nil {
return errorResult(gto.GetNamespacedName(), err)
}
if ignored {
return ignoreResult(gto.GetNamespacedName(), reason)
}
r.mutex.RLock()
timeToDeploy := time.Now().Sub(r.lastUpdateTimes[owner.GetSpec().Repository])
r.mutex.RUnlock()
if err = controllerutil.SetControllerReference(owner, gto, r.scheme); err != nil {
return errorResult(gto.GetNamespacedName(), err)
}
found := gto.DeepCopyInterface()
err = r.Get(context.TODO(), types.NamespacedName{Name: gto.GetName(), Namespace: g | add | identifier_name |
parse.go |
// Parse returns the parse tree for the XML from the given Reader.
func Parse(r io.Reader) (*Node, error) {
return ParseWithOptions(r, ParserOptions{})
}
// ParseWithOptions is like parse, but with custom options
func ParseWithOptions(r io.Reader, options ParserOptions) (*Node, error) {
p := createParser(r)
options.apply(p)
for {
_, err := p.parse()
if err == io.EOF {
return p.doc, nil
}
if err != nil {
return nil, err
}
}
}
type parser struct {
decoder *xml.Decoder
doc *Node
level int
prev *Node
streamElementXPath *xpath.Expr // Under streaming mode, this specifies the xpath to the target element node(s).
streamElementFilter *xpath.Expr // If specified, it provides further filtering on the target element.
streamNode *Node // Need to remember the last target node So we can clean it up upon next Read() call.
streamNodePrev *Node // Need to remember target node's prev so upon target node removal, we can restore correct prev.
reader *cachedReader // Need to maintain a reference to the reader, so we can determine whether a node contains CDATA.
}
func createParser(r io.Reader) *parser {
reader := newCachedReader(bufio.NewReader(r))
p := &parser{
decoder: xml.NewDecoder(reader),
doc: &Node{Type: DocumentNode},
level: 0,
reader: reader,
}
if p.decoder.CharsetReader == nil {
p.decoder.CharsetReader = charset.NewReaderLabel
}
p.prev = p.doc
return p
}
func (p *parser) parse() (*Node, error) {
var streamElementNodeCounter int
space2prefix := map[string]string{"http://www.w3.org/XML/1998/namespace": "xml"}
for {
p.reader.StartCaching()
tok, err := p.decoder.Token()
p.reader.StopCaching()
if err != nil {
return nil, err
}
switch tok := tok.(type) {
case xml.StartElement:
if p.level == 0 {
// mising XML declaration
attributes := make([]Attr, 1)
attributes[0].Name = xml.Name{Local: "version"}
attributes[0].Value = "1.0"
node := &Node{
Type: DeclarationNode,
Data: "xml",
Attr: attributes,
level: 1,
}
AddChild(p.prev, node)
p.level = 1
p.prev = node
}
for _, att := range tok.Attr {
if att.Name.Local == "xmlns" {
space2prefix[att.Value] = "" // reset empty if exist the default namespace
// defaultNamespaceURL = att.Value
} else if att.Name.Space == "xmlns" {
// maybe there are have duplicate NamespaceURL?
space2prefix[att.Value] = att.Name.Local
}
}
if space := tok.Name.Space; space != "" {
if _, found := space2prefix[space]; !found && p.decoder.Strict {
return nil, fmt.Errorf("xmlquery: invalid XML document, namespace %s is missing", space)
}
}
attributes := make([]Attr, len(tok.Attr))
for i, att := range tok.Attr {
name := att.Name
if prefix, ok := space2prefix[name.Space]; ok {
name.Space = prefix
}
attributes[i] = Attr{
Name: name,
Value: att.Value,
NamespaceURI: att.Name.Space,
}
}
node := &Node{
Type: ElementNode,
Data: tok.Name.Local,
NamespaceURI: tok.Name.Space,
Attr: attributes,
level: p.level,
}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
if node.NamespaceURI != "" {
if v, ok := space2prefix[node.NamespaceURI]; ok {
cached := string(p.reader.Cache())
if strings.HasPrefix(cached, fmt.Sprintf("%s:%s", v, node.Data)) || strings.HasPrefix(cached, fmt.Sprintf("<%s:%s", v, node.Data)) {
node.Prefix = v
}
}
}
// If we're in the streaming mode, we need to remember the node if it is the target node
// so that when we finish processing the node's EndElement, we know how/what to return to
// caller. Also we need to remove the target node from the tree upon next Read() call so
// memory doesn't grow unbounded.
if p.streamElementXPath != nil {
if p.streamNode == nil {
if QuerySelector(p.doc, p.streamElementXPath) != nil {
p.streamNode = node
p.streamNodePrev = p.prev
streamElementNodeCounter = 1
}
} else {
streamElementNodeCounter++
}
}
p.prev = node
p.level++
case xml.EndElement:
p.level--
// If we're in streaming mode, and we already have a potential streaming
// target node identified (p.streamNode != nil) then we need to check if
// this is the real one we want to return to caller.
if p.streamNode != nil {
streamElementNodeCounter--
if streamElementNodeCounter == 0 {
// Now we know this element node is the at least passing the initial
// p.streamElementXPath check and is a potential target node candidate.
// We need to have 1 more check with p.streamElementFilter (if given) to
// ensure it is really the element node we want.
// The reason we need a two-step check process is because the following
// situation:
// <AAA><BBB>b1</BBB></AAA>
// And say the p.streamElementXPath = "/AAA/BBB[. != 'b1']". Now during
// xml.StartElement time, the <BBB> node is still empty, so it will pass
// the p.streamElementXPath check. However, eventually we know this <BBB>
// shouldn't be returned to the caller. Having a second more fine-grained
// filter check ensures that. So in this case, the caller should really
// setup the stream parser with:
// streamElementXPath = "/AAA/BBB["
// streamElementFilter = "/AAA/BBB[. != 'b1']"
if p.streamElementFilter == nil || QuerySelector(p.doc, p.streamElementFilter) != nil {
return p.streamNode, nil
}
// otherwise, this isn't our target node, clean things up.
// note we also remove the underlying *Node from the node tree, to prevent
// future stream node candidate selection error.
RemoveFromTree(p.streamNode)
p.prev = p.streamNodePrev
p.streamNode = nil
p.streamNodePrev = nil
}
}
case xml.CharData:
// First, normalize the cache...
cached := strings.ToUpper(string(p.reader.Cache()))
nodeType := TextNode
if strings.HasPrefix(cached, "<![CDATA[") || strings.HasPrefix(cached, "![CDATA[") {
nodeType = CharDataNode
}
node := &Node{Type: nodeType, Data: string(tok), level: p.level}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
case xml.Comment:
node := &Node{Type: CommentNode, Data: string(tok), level: p.level}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
| {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// Make sure the Content-Type has a valid XML MIME type
if xmlMIMERegex.MatchString(resp.Header.Get("Content-Type")) {
return Parse(resp.Body)
}
return nil, fmt.Errorf("invalid XML document(%s)", resp.Header.Get("Content-Type"))
} | identifier_body | |
parse.go | (r io.Reader, options ParserOptions) (*Node, error) {
p := createParser(r)
options.apply(p)
for {
_, err := p.parse()
if err == io.EOF {
return p.doc, nil
}
if err != nil {
return nil, err
}
}
}
type parser struct {
decoder *xml.Decoder
doc *Node
level int
prev *Node
streamElementXPath *xpath.Expr // Under streaming mode, this specifies the xpath to the target element node(s).
streamElementFilter *xpath.Expr // If specified, it provides further filtering on the target element.
streamNode *Node // Need to remember the last target node So we can clean it up upon next Read() call.
streamNodePrev *Node // Need to remember target node's prev so upon target node removal, we can restore correct prev.
reader *cachedReader // Need to maintain a reference to the reader, so we can determine whether a node contains CDATA.
}
func createParser(r io.Reader) *parser {
reader := newCachedReader(bufio.NewReader(r))
p := &parser{
decoder: xml.NewDecoder(reader),
doc: &Node{Type: DocumentNode},
level: 0,
reader: reader,
}
if p.decoder.CharsetReader == nil {
p.decoder.CharsetReader = charset.NewReaderLabel
}
p.prev = p.doc
return p
}
func (p *parser) parse() (*Node, error) {
var streamElementNodeCounter int
space2prefix := map[string]string{"http://www.w3.org/XML/1998/namespace": "xml"}
for {
p.reader.StartCaching()
tok, err := p.decoder.Token()
p.reader.StopCaching()
if err != nil {
return nil, err
}
switch tok := tok.(type) {
case xml.StartElement:
if p.level == 0 {
// mising XML declaration
attributes := make([]Attr, 1)
attributes[0].Name = xml.Name{Local: "version"}
attributes[0].Value = "1.0"
node := &Node{
Type: DeclarationNode,
Data: "xml",
Attr: attributes,
level: 1,
}
AddChild(p.prev, node)
p.level = 1
p.prev = node
}
for _, att := range tok.Attr {
if att.Name.Local == "xmlns" {
space2prefix[att.Value] = "" // reset empty if exist the default namespace
// defaultNamespaceURL = att.Value
} else if att.Name.Space == "xmlns" {
// maybe there are have duplicate NamespaceURL?
space2prefix[att.Value] = att.Name.Local
}
}
if space := tok.Name.Space; space != "" {
if _, found := space2prefix[space]; !found && p.decoder.Strict {
return nil, fmt.Errorf("xmlquery: invalid XML document, namespace %s is missing", space)
}
}
attributes := make([]Attr, len(tok.Attr))
for i, att := range tok.Attr {
name := att.Name
if prefix, ok := space2prefix[name.Space]; ok {
name.Space = prefix
}
attributes[i] = Attr{
Name: name,
Value: att.Value,
NamespaceURI: att.Name.Space,
}
}
node := &Node{
Type: ElementNode,
Data: tok.Name.Local,
NamespaceURI: tok.Name.Space,
Attr: attributes,
level: p.level,
}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
if node.NamespaceURI != "" {
if v, ok := space2prefix[node.NamespaceURI]; ok {
cached := string(p.reader.Cache())
if strings.HasPrefix(cached, fmt.Sprintf("%s:%s", v, node.Data)) || strings.HasPrefix(cached, fmt.Sprintf("<%s:%s", v, node.Data)) {
node.Prefix = v
}
}
}
// If we're in the streaming mode, we need to remember the node if it is the target node
// so that when we finish processing the node's EndElement, we know how/what to return to
// caller. Also we need to remove the target node from the tree upon next Read() call so
// memory doesn't grow unbounded.
if p.streamElementXPath != nil {
if p.streamNode == nil {
if QuerySelector(p.doc, p.streamElementXPath) != nil {
p.streamNode = node
p.streamNodePrev = p.prev
streamElementNodeCounter = 1
}
} else {
streamElementNodeCounter++
}
}
p.prev = node
p.level++
case xml.EndElement:
p.level--
// If we're in streaming mode, and we already have a potential streaming
// target node identified (p.streamNode != nil) then we need to check if
// this is the real one we want to return to caller.
if p.streamNode != nil {
streamElementNodeCounter--
if streamElementNodeCounter == 0 {
// Now we know this element node is the at least passing the initial
// p.streamElementXPath check and is a potential target node candidate.
// We need to have 1 more check with p.streamElementFilter (if given) to
// ensure it is really the element node we want.
// The reason we need a two-step check process is because the following
// situation:
// <AAA><BBB>b1</BBB></AAA>
// And say the p.streamElementXPath = "/AAA/BBB[. != 'b1']". Now during
// xml.StartElement time, the <BBB> node is still empty, so it will pass
// the p.streamElementXPath check. However, eventually we know this <BBB>
// shouldn't be returned to the caller. Having a second more fine-grained
// filter check ensures that. So in this case, the caller should really
// setup the stream parser with:
// streamElementXPath = "/AAA/BBB["
// streamElementFilter = "/AAA/BBB[. != 'b1']"
if p.streamElementFilter == nil || QuerySelector(p.doc, p.streamElementFilter) != nil {
return p.streamNode, nil
}
// otherwise, this isn't our target node, clean things up.
// note we also remove the underlying *Node from the node tree, to prevent
// future stream node candidate selection error.
RemoveFromTree(p.streamNode)
p.prev = p.streamNodePrev
p.streamNode = nil
p.streamNodePrev = nil
}
}
case xml.CharData:
// First, normalize the cache...
cached := strings.ToUpper(string(p.reader.Cache()))
nodeType := TextNode
if strings.HasPrefix(cached, "<![CDATA[") || strings.HasPrefix(cached, "![CDATA[") {
nodeType = CharDataNode
}
node := &Node{Type: nodeType, Data: string(tok), level: p.level}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
case xml.Comment:
node := &Node{Type: CommentNode, Data: string(tok), level: p.level}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
case xml.ProcInst: // Processing Instruction
if p.prev.Type != DeclarationNode {
p.level++
}
node := &Node{Type: DeclarationNode, Data: tok.Target, level: p.level}
pairs := strings.Split(string(tok.Inst), " ")
for _, pair := range pairs {
pair = strings.TrimSpace(pair)
if i := strings.Index(pair, "="); i > 0 {
AddAttr(node, pair[:i], strings.Trim(pair[i+1:], `"`))
}
}
if p.level == p | ParseWithOptions | identifier_name | |
parse.go | node So we can clean it up upon next Read() call.
streamNodePrev *Node // Need to remember target node's prev so upon target node removal, we can restore correct prev.
reader *cachedReader // Need to maintain a reference to the reader, so we can determine whether a node contains CDATA.
}
func createParser(r io.Reader) *parser {
reader := newCachedReader(bufio.NewReader(r))
p := &parser{
decoder: xml.NewDecoder(reader),
doc: &Node{Type: DocumentNode},
level: 0,
reader: reader,
}
if p.decoder.CharsetReader == nil {
p.decoder.CharsetReader = charset.NewReaderLabel
}
p.prev = p.doc
return p
}
func (p *parser) parse() (*Node, error) {
var streamElementNodeCounter int
space2prefix := map[string]string{"http://www.w3.org/XML/1998/namespace": "xml"}
for {
p.reader.StartCaching()
tok, err := p.decoder.Token()
p.reader.StopCaching()
if err != nil {
return nil, err
}
switch tok := tok.(type) {
case xml.StartElement:
if p.level == 0 {
// mising XML declaration
attributes := make([]Attr, 1)
attributes[0].Name = xml.Name{Local: "version"}
attributes[0].Value = "1.0"
node := &Node{
Type: DeclarationNode,
Data: "xml",
Attr: attributes,
level: 1,
}
AddChild(p.prev, node)
p.level = 1
p.prev = node
}
for _, att := range tok.Attr {
if att.Name.Local == "xmlns" {
space2prefix[att.Value] = "" // reset empty if exist the default namespace
// defaultNamespaceURL = att.Value
} else if att.Name.Space == "xmlns" {
// maybe there are have duplicate NamespaceURL?
space2prefix[att.Value] = att.Name.Local
}
}
if space := tok.Name.Space; space != "" {
if _, found := space2prefix[space]; !found && p.decoder.Strict {
return nil, fmt.Errorf("xmlquery: invalid XML document, namespace %s is missing", space)
}
}
attributes := make([]Attr, len(tok.Attr))
for i, att := range tok.Attr {
name := att.Name
if prefix, ok := space2prefix[name.Space]; ok {
name.Space = prefix
}
attributes[i] = Attr{
Name: name,
Value: att.Value,
NamespaceURI: att.Name.Space,
}
}
node := &Node{
Type: ElementNode,
Data: tok.Name.Local,
NamespaceURI: tok.Name.Space,
Attr: attributes,
level: p.level,
}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
if node.NamespaceURI != "" {
if v, ok := space2prefix[node.NamespaceURI]; ok {
cached := string(p.reader.Cache())
if strings.HasPrefix(cached, fmt.Sprintf("%s:%s", v, node.Data)) || strings.HasPrefix(cached, fmt.Sprintf("<%s:%s", v, node.Data)) {
node.Prefix = v
}
}
}
// If we're in the streaming mode, we need to remember the node if it is the target node
// so that when we finish processing the node's EndElement, we know how/what to return to
// caller. Also we need to remove the target node from the tree upon next Read() call so
// memory doesn't grow unbounded.
if p.streamElementXPath != nil {
if p.streamNode == nil {
if QuerySelector(p.doc, p.streamElementXPath) != nil {
p.streamNode = node
p.streamNodePrev = p.prev
streamElementNodeCounter = 1
}
} else {
streamElementNodeCounter++
}
}
p.prev = node
p.level++
case xml.EndElement:
p.level--
// If we're in streaming mode, and we already have a potential streaming
// target node identified (p.streamNode != nil) then we need to check if
// this is the real one we want to return to caller.
if p.streamNode != nil {
streamElementNodeCounter--
if streamElementNodeCounter == 0 {
// Now we know this element node is the at least passing the initial
// p.streamElementXPath check and is a potential target node candidate.
// We need to have 1 more check with p.streamElementFilter (if given) to
// ensure it is really the element node we want.
// The reason we need a two-step check process is because the following
// situation:
// <AAA><BBB>b1</BBB></AAA>
// And say the p.streamElementXPath = "/AAA/BBB[. != 'b1']". Now during
// xml.StartElement time, the <BBB> node is still empty, so it will pass
// the p.streamElementXPath check. However, eventually we know this <BBB>
// shouldn't be returned to the caller. Having a second more fine-grained
// filter check ensures that. So in this case, the caller should really
// setup the stream parser with:
// streamElementXPath = "/AAA/BBB["
// streamElementFilter = "/AAA/BBB[. != 'b1']"
if p.streamElementFilter == nil || QuerySelector(p.doc, p.streamElementFilter) != nil {
return p.streamNode, nil
}
// otherwise, this isn't our target node, clean things up.
// note we also remove the underlying *Node from the node tree, to prevent
// future stream node candidate selection error.
RemoveFromTree(p.streamNode)
p.prev = p.streamNodePrev
p.streamNode = nil
p.streamNodePrev = nil
}
}
case xml.CharData:
// First, normalize the cache...
cached := strings.ToUpper(string(p.reader.Cache()))
nodeType := TextNode
if strings.HasPrefix(cached, "<![CDATA[") || strings.HasPrefix(cached, "![CDATA[") {
nodeType = CharDataNode
}
node := &Node{Type: nodeType, Data: string(tok), level: p.level}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
case xml.Comment:
node := &Node{Type: CommentNode, Data: string(tok), level: p.level}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
case xml.ProcInst: // Processing Instruction
if p.prev.Type != DeclarationNode {
p.level++
}
node := &Node{Type: DeclarationNode, Data: tok.Target, level: p.level}
pairs := strings.Split(string(tok.Inst), " ")
for _, pair := range pairs {
pair = strings.TrimSpace(pair)
if i := strings.Index(pair, "="); i > 0 {
AddAttr(node, pair[:i], strings.Trim(pair[i+1:], `"`))
}
}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level | else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
p.prev = node
case xml.Directive:
}
}
}
// StreamParser enables loading and parsing an XML document in a streaming
// fashion.
type StreamParser struct {
p *parser
}
// CreateStreamParser creates a StreamParser. Argument streamElementXPath is
// required.
// Argument streamElementFilter | {
AddChild(p.prev, node)
} | conditional_block |
parse.go | target node So we can clean it up upon next Read() call.
streamNodePrev *Node // Need to remember target node's prev so upon target node removal, we can restore correct prev.
reader *cachedReader // Need to maintain a reference to the reader, so we can determine whether a node contains CDATA.
}
func createParser(r io.Reader) *parser {
reader := newCachedReader(bufio.NewReader(r)) | decoder: xml.NewDecoder(reader),
doc: &Node{Type: DocumentNode},
level: 0,
reader: reader,
}
if p.decoder.CharsetReader == nil {
p.decoder.CharsetReader = charset.NewReaderLabel
}
p.prev = p.doc
return p
}
func (p *parser) parse() (*Node, error) {
var streamElementNodeCounter int
space2prefix := map[string]string{"http://www.w3.org/XML/1998/namespace": "xml"}
for {
p.reader.StartCaching()
tok, err := p.decoder.Token()
p.reader.StopCaching()
if err != nil {
return nil, err
}
switch tok := tok.(type) {
case xml.StartElement:
if p.level == 0 {
// mising XML declaration
attributes := make([]Attr, 1)
attributes[0].Name = xml.Name{Local: "version"}
attributes[0].Value = "1.0"
node := &Node{
Type: DeclarationNode,
Data: "xml",
Attr: attributes,
level: 1,
}
AddChild(p.prev, node)
p.level = 1
p.prev = node
}
for _, att := range tok.Attr {
if att.Name.Local == "xmlns" {
space2prefix[att.Value] = "" // reset empty if exist the default namespace
// defaultNamespaceURL = att.Value
} else if att.Name.Space == "xmlns" {
// maybe there are have duplicate NamespaceURL?
space2prefix[att.Value] = att.Name.Local
}
}
if space := tok.Name.Space; space != "" {
if _, found := space2prefix[space]; !found && p.decoder.Strict {
return nil, fmt.Errorf("xmlquery: invalid XML document, namespace %s is missing", space)
}
}
attributes := make([]Attr, len(tok.Attr))
for i, att := range tok.Attr {
name := att.Name
if prefix, ok := space2prefix[name.Space]; ok {
name.Space = prefix
}
attributes[i] = Attr{
Name: name,
Value: att.Value,
NamespaceURI: att.Name.Space,
}
}
node := &Node{
Type: ElementNode,
Data: tok.Name.Local,
NamespaceURI: tok.Name.Space,
Attr: attributes,
level: p.level,
}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
if node.NamespaceURI != "" {
if v, ok := space2prefix[node.NamespaceURI]; ok {
cached := string(p.reader.Cache())
if strings.HasPrefix(cached, fmt.Sprintf("%s:%s", v, node.Data)) || strings.HasPrefix(cached, fmt.Sprintf("<%s:%s", v, node.Data)) {
node.Prefix = v
}
}
}
// If we're in the streaming mode, we need to remember the node if it is the target node
// so that when we finish processing the node's EndElement, we know how/what to return to
// caller. Also we need to remove the target node from the tree upon next Read() call so
// memory doesn't grow unbounded.
if p.streamElementXPath != nil {
if p.streamNode == nil {
if QuerySelector(p.doc, p.streamElementXPath) != nil {
p.streamNode = node
p.streamNodePrev = p.prev
streamElementNodeCounter = 1
}
} else {
streamElementNodeCounter++
}
}
p.prev = node
p.level++
case xml.EndElement:
p.level--
// If we're in streaming mode, and we already have a potential streaming
// target node identified (p.streamNode != nil) then we need to check if
// this is the real one we want to return to caller.
if p.streamNode != nil {
streamElementNodeCounter--
if streamElementNodeCounter == 0 {
// Now we know this element node is the at least passing the initial
// p.streamElementXPath check and is a potential target node candidate.
// We need to have 1 more check with p.streamElementFilter (if given) to
// ensure it is really the element node we want.
// The reason we need a two-step check process is because the following
// situation:
// <AAA><BBB>b1</BBB></AAA>
// And say the p.streamElementXPath = "/AAA/BBB[. != 'b1']". Now during
// xml.StartElement time, the <BBB> node is still empty, so it will pass
// the p.streamElementXPath check. However, eventually we know this <BBB>
// shouldn't be returned to the caller. Having a second more fine-grained
// filter check ensures that. So in this case, the caller should really
// setup the stream parser with:
// streamElementXPath = "/AAA/BBB["
// streamElementFilter = "/AAA/BBB[. != 'b1']"
if p.streamElementFilter == nil || QuerySelector(p.doc, p.streamElementFilter) != nil {
return p.streamNode, nil
}
// otherwise, this isn't our target node, clean things up.
// note we also remove the underlying *Node from the node tree, to prevent
// future stream node candidate selection error.
RemoveFromTree(p.streamNode)
p.prev = p.streamNodePrev
p.streamNode = nil
p.streamNodePrev = nil
}
}
case xml.CharData:
// First, normalize the cache...
cached := strings.ToUpper(string(p.reader.Cache()))
nodeType := TextNode
if strings.HasPrefix(cached, "<![CDATA[") || strings.HasPrefix(cached, "![CDATA[") {
nodeType = CharDataNode
}
node := &Node{Type: nodeType, Data: string(tok), level: p.level}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
case xml.Comment:
node := &Node{Type: CommentNode, Data: string(tok), level: p.level}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
case xml.ProcInst: // Processing Instruction
if p.prev.Type != DeclarationNode {
p.level++
}
node := &Node{Type: DeclarationNode, Data: tok.Target, level: p.level}
pairs := strings.Split(string(tok.Inst), " ")
for _, pair := range pairs {
pair = strings.TrimSpace(pair)
if i := strings.Index(pair, "="); i > 0 {
AddAttr(node, pair[:i], strings.Trim(pair[i+1:], `"`))
}
}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
p.prev = node
case xml.Directive:
}
}
}
// StreamParser enables loading and parsing an XML document in a streaming
// fashion.
type StreamParser struct {
p *parser
}
// CreateStreamParser creates a StreamParser. Argument streamElementXPath is
// required.
// Argument streamElementFilter | p := &parser{ | random_line_split |
RoomNotifs.ts | IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import { PushProcessor } from "matrix-js-sdk/src/pushprocessor";
import {
NotificationCountType,
ConditionKind,
PushRuleActionName,
PushRuleKind,
TweakName,
} from "matrix-js-sdk/src/matrix";
import type { IPushRule, Room, MatrixClient } from "matrix-js-sdk/src/matrix";
import { NotificationColor } from "./stores/notifications/NotificationColor";
import { getUnsentMessages } from "./components/structures/RoomStatusBar";
import { doesRoomHaveUnreadMessages, doesRoomOrThreadHaveUnreadMessages } from "./Unread";
import { EffectiveMembership, getEffectiveMembership } from "./utils/membership";
import SettingsStore from "./settings/SettingsStore";
export enum RoomNotifState {
AllMessagesLoud = "all_messages_loud",
AllMessages = "all_messages",
MentionsOnly = "mentions_only",
Mute = "mute",
}
export function getRoomNotifsState(client: MatrixClient, roomId: string): RoomNotifState | null | // XXX: We have to assume the default is to notify for all messages
// (in particular this will be 'wrong' for one to one rooms because
// they will notify loudly for all messages)
if (!roomRule?.enabled) return RoomNotifState.AllMessages;
// a mute at the room level will still allow mentions
// to notify
if (isMuteRule(roomRule)) return RoomNotifState.MentionsOnly;
const actionsObject = PushProcessor.actionListToActionsObject(roomRule.actions);
if (actionsObject.tweaks.sound) return RoomNotifState.AllMessagesLoud;
return null;
}
export function setRoomNotifsState(client: MatrixClient, roomId: string, newState: RoomNotifState): Promise<void> {
if (newState === RoomNotifState.Mute) {
return setRoomNotifsStateMuted(client, roomId);
} else {
return setRoomNotifsStateUnmuted(client, roomId, newState);
}
}
export function getUnreadNotificationCount(room: Room, type: NotificationCountType, threadId?: string): number {
let notificationCount = !!threadId
? room.getThreadUnreadNotificationCount(threadId, type)
: room.getUnreadNotificationCount(type);
// Check notification counts in the old room just in case there's some lost
// there. We only go one level down to avoid performance issues, and theory
// is that 1st generation rooms will have already been read by the 3rd generation.
const msc3946ProcessDynamicPredecessor = SettingsStore.getValue("feature_dynamic_room_predecessors");
const predecessor = room.findPredecessor(msc3946ProcessDynamicPredecessor);
// Exclude threadId, as the same thread can't continue over a room upgrade
if (!threadId && predecessor?.roomId) {
const oldRoomId = predecessor.roomId;
const oldRoom = room.client.getRoom(oldRoomId);
if (oldRoom) {
// We only ever care if there's highlights in the old room. No point in
// notifying the user for unread messages because they would have extreme
// difficulty changing their notification preferences away from "All Messages"
// and "Noisy".
notificationCount += oldRoom.getUnreadNotificationCount(NotificationCountType.Highlight);
}
}
return notificationCount;
}
function setRoomNotifsStateMuted(cli: MatrixClient, roomId: string): Promise<any> {
const promises: Promise<unknown>[] = [];
// delete the room rule
const roomRule = cli.getRoomPushRule("global", roomId);
if (roomRule) {
promises.push(cli.deletePushRule("global", PushRuleKind.RoomSpecific, roomRule.rule_id));
}
// add/replace an override rule to squelch everything in this room
// NB. We use the room ID as the name of this rule too, although this
// is an override rule, not a room rule: it still pertains to this room
// though, so using the room ID as the rule ID is logical and prevents
// duplicate copies of the rule.
promises.push(
cli.addPushRule("global", PushRuleKind.Override, roomId, {
conditions: [
{
kind: ConditionKind.EventMatch,
key: "room_id",
pattern: roomId,
},
],
actions: [PushRuleActionName.DontNotify],
}),
);
return Promise.all(promises);
}
function setRoomNotifsStateUnmuted(cli: MatrixClient, roomId: string, newState: RoomNotifState): Promise<any> {
const promises: Promise<unknown>[] = [];
const overrideMuteRule = findOverrideMuteRule(cli, roomId);
if (overrideMuteRule) {
promises.push(cli.deletePushRule("global", PushRuleKind.Override, overrideMuteRule.rule_id));
}
if (newState === RoomNotifState.AllMessages) {
const roomRule = cli.getRoomPushRule("global", roomId);
if (roomRule) {
promises.push(cli.deletePushRule("global", PushRuleKind.RoomSpecific, roomRule.rule_id));
}
} else if (newState === RoomNotifState.MentionsOnly) {
promises.push(
cli.addPushRule("global", PushRuleKind.RoomSpecific, roomId, {
actions: [PushRuleActionName.DontNotify],
}),
);
} else if (newState === RoomNotifState.AllMessagesLoud) {
promises.push(
cli.addPushRule("global", PushRuleKind.RoomSpecific, roomId, {
actions: [
PushRuleActionName.Notify,
{
set_tweak: TweakName.Sound,
value: "default",
},
],
}),
);
}
return Promise.all(promises);
}
function findOverrideMuteRule(cli: MatrixClient | undefined, roomId: string): IPushRule | null {
if (!cli?.pushRules?.global?.override) {
return null;
}
for (const rule of cli.pushRules.global.override) {
if (rule.enabled && isRuleRoomMuteRuleForRoomId(roomId, rule)) {
return rule;
}
}
return null;
}
/**
* Checks if a given rule is a room mute rule as implemented by EW
* - matches every event in one room (one condition that is an event match on roomId)
* - silences notifications (one action that is `DontNotify`)
* @param rule - push rule
* @returns {boolean} - true when rule mutes a room
*/
export function isRuleMaybeRoomMuteRule(rule: IPushRule): boolean {
return (
// matches every event in one room
rule.conditions?.length === 1 &&
rule.conditions[0].kind === ConditionKind.EventMatch &&
rule.conditions[0].key === "room_id" &&
// silences notifications
isMuteRule(rule)
);
}
/**
* Checks if a given rule is a room mute rule as implemented by EW
* @param roomId - id of room to match
* @param rule - push rule
* @returns {boolean} true when rule mutes the given room
*/
function isRuleRoomMuteRuleForRoomId(roomId: string, rule: IPushRule): boolean {
if (!isRuleMaybeRoomMuteRule(rule)) {
return false;
}
// isRuleMaybeRoomMuteRule checks this condition exists
const cond = rule.conditions![0]!;
return cond.pattern === roomId;
}
function isMuteRule(rule: IPushRule): boolean {
// DontNotify is equivalent to the empty actions array
return (
rule.actions.length === 0 || (rule.actions.length === 1 && rule.actions[0] === PushRuleActionName.DontNotify)
);
}
export function determineUnreadState(
room?: Room,
threadId?: string,
): { color: NotificationColor; symbol: string | null; count: number } {
if (!room) {
return { symbol: null, count: 0, color: NotificationColor.None };
}
if (getUnsentMessages(room, threadId).length > 0) {
return { symbol: "!", count: 1, color: NotificationColor.Unsent };
}
if (getEffectiveMembership(room.getMyMembership()) === EffectiveMembership.Invite) {
return { symbol: "!", count: 1, color: NotificationColor.Red };
}
if (getRoomNotifsState(room.client, room.roomId) === RoomNotifState.Mute) {
| {
if (client.isGuest()) return RoomNotifState.AllMessages;
// look through the override rules for a rule affecting this room:
// if one exists, it will take precedence.
const muteRule = findOverrideMuteRule(client, roomId);
if (muteRule) {
return RoomNotifState.Mute;
}
// for everything else, look at the room rule.
let roomRule: IPushRule | undefined;
try {
roomRule = client.getRoomPushRule("global", roomId);
} catch (err) {
// Possible that the client doesn't have pushRules yet. If so, it
// hasn't started either, so indicate that this room is not notifying.
return null;
}
| identifier_body |
RoomNotifs.ts | IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import { PushProcessor } from "matrix-js-sdk/src/pushprocessor";
import {
NotificationCountType,
ConditionKind,
PushRuleActionName,
PushRuleKind,
TweakName,
} from "matrix-js-sdk/src/matrix";
import type { IPushRule, Room, MatrixClient } from "matrix-js-sdk/src/matrix";
import { NotificationColor } from "./stores/notifications/NotificationColor";
import { getUnsentMessages } from "./components/structures/RoomStatusBar";
import { doesRoomHaveUnreadMessages, doesRoomOrThreadHaveUnreadMessages } from "./Unread";
import { EffectiveMembership, getEffectiveMembership } from "./utils/membership";
import SettingsStore from "./settings/SettingsStore";
export enum RoomNotifState {
AllMessagesLoud = "all_messages_loud",
AllMessages = "all_messages",
MentionsOnly = "mentions_only",
Mute = "mute",
}
export function getRoomNotifsState(client: MatrixClient, roomId: string): RoomNotifState | null {
if (client.isGuest()) return RoomNotifState.AllMessages;
// look through the override rules for a rule affecting this room:
// if one exists, it will take precedence.
const muteRule = findOverrideMuteRule(client, roomId);
if (muteRule) {
return RoomNotifState.Mute;
}
// for everything else, look at the room rule.
let roomRule: IPushRule | undefined;
try {
roomRule = client.getRoomPushRule("global", roomId);
} catch (err) {
// Possible that the client doesn't have pushRules yet. If so, it
// hasn't started either, so indicate that this room is not notifying.
return null;
}
// XXX: We have to assume the default is to notify for all messages
// (in particular this will be 'wrong' for one to one rooms because
// they will notify loudly for all messages)
if (!roomRule?.enabled) return RoomNotifState.AllMessages;
// a mute at the room level will still allow mentions
// to notify
if (isMuteRule(roomRule)) return RoomNotifState.MentionsOnly;
const actionsObject = PushProcessor.actionListToActionsObject(roomRule.actions);
if (actionsObject.tweaks.sound) return RoomNotifState.AllMessagesLoud;
return null;
}
export function setRoomNotifsState(client: MatrixClient, roomId: string, newState: RoomNotifState): Promise<void> {
if (newState === RoomNotifState.Mute) {
return setRoomNotifsStateMuted(client, roomId);
} else {
return setRoomNotifsStateUnmuted(client, roomId, newState);
}
}
export function getUnreadNotificationCount(room: Room, type: NotificationCountType, threadId?: string): number {
let notificationCount = !!threadId
? room.getThreadUnreadNotificationCount(threadId, type)
: room.getUnreadNotificationCount(type);
// Check notification counts in the old room just in case there's some lost
// there. We only go one level down to avoid performance issues, and theory
// is that 1st generation rooms will have already been read by the 3rd generation.
const msc3946ProcessDynamicPredecessor = SettingsStore.getValue("feature_dynamic_room_predecessors");
const predecessor = room.findPredecessor(msc3946ProcessDynamicPredecessor);
// Exclude threadId, as the same thread can't continue over a room upgrade
if (!threadId && predecessor?.roomId) {
const oldRoomId = predecessor.roomId;
const oldRoom = room.client.getRoom(oldRoomId);
if (oldRoom) {
// We only ever care if there's highlights in the old room. No point in
// notifying the user for unread messages because they would have extreme
// difficulty changing their notification preferences away from "All Messages"
// and "Noisy".
notificationCount += oldRoom.getUnreadNotificationCount(NotificationCountType.Highlight);
}
}
return notificationCount;
}
function setRoomNotifsStateMuted(cli: MatrixClient, roomId: string): Promise<any> {
const promises: Promise<unknown>[] = [];
// delete the room rule
const roomRule = cli.getRoomPushRule("global", roomId);
if (roomRule) {
promises.push(cli.deletePushRule("global", PushRuleKind.RoomSpecific, roomRule.rule_id));
}
// add/replace an override rule to squelch everything in this room
// NB. We use the room ID as the name of this rule too, although this
// is an override rule, not a room rule: it still pertains to this room
// though, so using the room ID as the rule ID is logical and prevents
// duplicate copies of the rule.
promises.push(
cli.addPushRule("global", PushRuleKind.Override, roomId, {
conditions: [
{
kind: ConditionKind.EventMatch,
key: "room_id",
pattern: roomId,
},
],
actions: [PushRuleActionName.DontNotify],
}),
);
return Promise.all(promises);
}
function setRoomNotifsStateUnmuted(cli: MatrixClient, roomId: string, newState: RoomNotifState): Promise<any> {
const promises: Promise<unknown>[] = [];
const overrideMuteRule = findOverrideMuteRule(cli, roomId);
if (overrideMuteRule) {
promises.push(cli.deletePushRule("global", PushRuleKind.Override, overrideMuteRule.rule_id));
}
if (newState === RoomNotifState.AllMessages) {
const roomRule = cli.getRoomPushRule("global", roomId);
if (roomRule) {
promises.push(cli.deletePushRule("global", PushRuleKind.RoomSpecific, roomRule.rule_id));
}
} else if (newState === RoomNotifState.MentionsOnly) {
promises.push(
cli.addPushRule("global", PushRuleKind.RoomSpecific, roomId, {
actions: [PushRuleActionName.DontNotify],
}),
);
} else if (newState === RoomNotifState.AllMessagesLoud) {
promises.push(
cli.addPushRule("global", PushRuleKind.RoomSpecific, roomId, {
actions: [
PushRuleActionName.Notify,
{
set_tweak: TweakName.Sound,
value: "default",
},
],
}),
);
}
return Promise.all(promises);
}
function findOverrideMuteRule(cli: MatrixClient | undefined, roomId: string): IPushRule | null {
if (!cli?.pushRules?.global?.override) {
return null;
}
for (const rule of cli.pushRules.global.override) {
if (rule.enabled && isRuleRoomMuteRuleForRoomId(roomId, rule)) {
return rule;
}
}
return null;
}
/**
* Checks if a given rule is a room mute rule as implemented by EW
* - matches every event in one room (one condition that is an event match on roomId)
* - silences notifications (one action that is `DontNotify`)
* @param rule - push rule
* @returns {boolean} - true when rule mutes a room
*/
export function isRuleMaybeRoomMuteRule(rule: IPushRule): boolean {
return (
// matches every event in one room
rule.conditions?.length === 1 &&
rule.conditions[0].kind === ConditionKind.EventMatch &&
rule.conditions[0].key === "room_id" &&
// silences notifications
isMuteRule(rule)
);
}
/**
* Checks if a given rule is a room mute rule as implemented by EW
* @param roomId - id of room to match
* @param rule - push rule
* @returns {boolean} true when rule mutes the given room
*/
function isRuleRoomMuteRuleForRoomId(roomId: string, rule: IPushRule): boolean {
if (!isRuleMaybeRoomMuteRule(rule)) {
return false;
}
// isRuleMaybeRoomMuteRule checks this condition exists
const cond = rule.conditions![0]!;
return cond.pattern === roomId;
}
function | (rule: IPushRule): boolean {
// DontNotify is equivalent to the empty actions array
return (
rule.actions.length === 0 || (rule.actions.length === 1 && rule.actions[0] === PushRuleActionName.DontNotify)
);
}
export function determineUnreadState(
room?: Room,
threadId?: string,
): { color: NotificationColor; symbol: string | null; count: number } {
if (!room) {
return { symbol: null, count: 0, color: NotificationColor.None };
}
if (getUnsentMessages(room, threadId).length > 0) {
return { symbol: "!", count: 1, color: NotificationColor.Unsent };
}
if (getEffectiveMembership(room.getMyMembership()) === EffectiveMembership.Invite) {
return { symbol: "!", count: 1, color: NotificationColor.Red };
}
if (getRoomNotifsState(room.client, room.roomId) === RoomNotifState.Mute) {
| isMuteRule | identifier_name |
RoomNotifs.ts | IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import { PushProcessor } from "matrix-js-sdk/src/pushprocessor";
import {
NotificationCountType,
ConditionKind,
PushRuleActionName,
PushRuleKind,
TweakName,
} from "matrix-js-sdk/src/matrix";
import type { IPushRule, Room, MatrixClient } from "matrix-js-sdk/src/matrix";
import { NotificationColor } from "./stores/notifications/NotificationColor";
import { getUnsentMessages } from "./components/structures/RoomStatusBar";
import { doesRoomHaveUnreadMessages, doesRoomOrThreadHaveUnreadMessages } from "./Unread";
import { EffectiveMembership, getEffectiveMembership } from "./utils/membership";
import SettingsStore from "./settings/SettingsStore";
export enum RoomNotifState {
AllMessagesLoud = "all_messages_loud",
AllMessages = "all_messages",
MentionsOnly = "mentions_only",
Mute = "mute",
}
export function getRoomNotifsState(client: MatrixClient, roomId: string): RoomNotifState | null {
if (client.isGuest()) return RoomNotifState.AllMessages;
// look through the override rules for a rule affecting this room:
// if one exists, it will take precedence.
const muteRule = findOverrideMuteRule(client, roomId);
if (muteRule) {
return RoomNotifState.Mute;
}
// for everything else, look at the room rule.
let roomRule: IPushRule | undefined;
try {
roomRule = client.getRoomPushRule("global", roomId);
} catch (err) {
// Possible that the client doesn't have pushRules yet. If so, it
// hasn't started either, so indicate that this room is not notifying.
return null;
}
// XXX: We have to assume the default is to notify for all messages
// (in particular this will be 'wrong' for one to one rooms because
// they will notify loudly for all messages)
if (!roomRule?.enabled) return RoomNotifState.AllMessages;
// a mute at the room level will still allow mentions
// to notify
if (isMuteRule(roomRule)) return RoomNotifState.MentionsOnly;
const actionsObject = PushProcessor.actionListToActionsObject(roomRule.actions);
if (actionsObject.tweaks.sound) return RoomNotifState.AllMessagesLoud;
return null;
}
export function setRoomNotifsState(client: MatrixClient, roomId: string, newState: RoomNotifState): Promise<void> {
if (newState === RoomNotifState.Mute) {
return setRoomNotifsStateMuted(client, roomId);
} else {
return setRoomNotifsStateUnmuted(client, roomId, newState);
}
}
export function getUnreadNotificationCount(room: Room, type: NotificationCountType, threadId?: string): number {
let notificationCount = !!threadId
? room.getThreadUnreadNotificationCount(threadId, type)
: room.getUnreadNotificationCount(type);
// Check notification counts in the old room just in case there's some lost
// there. We only go one level down to avoid performance issues, and theory
// is that 1st generation rooms will have already been read by the 3rd generation.
const msc3946ProcessDynamicPredecessor = SettingsStore.getValue("feature_dynamic_room_predecessors");
const predecessor = room.findPredecessor(msc3946ProcessDynamicPredecessor);
// Exclude threadId, as the same thread can't continue over a room upgrade
if (!threadId && predecessor?.roomId) {
const oldRoomId = predecessor.roomId;
const oldRoom = room.client.getRoom(oldRoomId);
if (oldRoom) {
// We only ever care if there's highlights in the old room. No point in
// notifying the user for unread messages because they would have extreme
// difficulty changing their notification preferences away from "All Messages"
// and "Noisy".
notificationCount += oldRoom.getUnreadNotificationCount(NotificationCountType.Highlight);
}
}
return notificationCount;
}
function setRoomNotifsStateMuted(cli: MatrixClient, roomId: string): Promise<any> {
const promises: Promise<unknown>[] = [];
// delete the room rule
const roomRule = cli.getRoomPushRule("global", roomId);
if (roomRule) {
promises.push(cli.deletePushRule("global", PushRuleKind.RoomSpecific, roomRule.rule_id));
}
// add/replace an override rule to squelch everything in this room
// NB. We use the room ID as the name of this rule too, although this
// is an override rule, not a room rule: it still pertains to this room
// though, so using the room ID as the rule ID is logical and prevents
// duplicate copies of the rule.
promises.push(
cli.addPushRule("global", PushRuleKind.Override, roomId, {
conditions: [
{
kind: ConditionKind.EventMatch,
key: "room_id",
pattern: roomId,
},
],
actions: [PushRuleActionName.DontNotify],
}),
);
return Promise.all(promises);
}
function setRoomNotifsStateUnmuted(cli: MatrixClient, roomId: string, newState: RoomNotifState): Promise<any> {
const promises: Promise<unknown>[] = [];
const overrideMuteRule = findOverrideMuteRule(cli, roomId);
if (overrideMuteRule) {
promises.push(cli.deletePushRule("global", PushRuleKind.Override, overrideMuteRule.rule_id));
}
if (newState === RoomNotifState.AllMessages) {
const roomRule = cli.getRoomPushRule("global", roomId);
if (roomRule) {
promises.push(cli.deletePushRule("global", PushRuleKind.RoomSpecific, roomRule.rule_id));
}
} else if (newState === RoomNotifState.MentionsOnly) {
promises.push(
cli.addPushRule("global", PushRuleKind.RoomSpecific, roomId, {
actions: [PushRuleActionName.DontNotify],
}),
);
} else if (newState === RoomNotifState.AllMessagesLoud) {
promises.push(
cli.addPushRule("global", PushRuleKind.RoomSpecific, roomId, {
actions: [
PushRuleActionName.Notify,
{
set_tweak: TweakName.Sound,
value: "default",
},
],
}),
);
}
return Promise.all(promises);
}
function findOverrideMuteRule(cli: MatrixClient | undefined, roomId: string): IPushRule | null {
if (!cli?.pushRules?.global?.override) {
return null;
}
for (const rule of cli.pushRules.global.override) {
if (rule.enabled && isRuleRoomMuteRuleForRoomId(roomId, rule)) {
return rule;
}
}
return null;
}
/**
* Checks if a given rule is a room mute rule as implemented by EW
* - matches every event in one room (one condition that is an event match on roomId)
* - silences notifications (one action that is `DontNotify`)
* @param rule - push rule
* @returns {boolean} - true when rule mutes a room
*/
export function isRuleMaybeRoomMuteRule(rule: IPushRule): boolean {
return (
// matches every event in one room
rule.conditions?.length === 1 &&
rule.conditions[0].kind === ConditionKind.EventMatch &&
rule.conditions[0].key === "room_id" &&
// silences notifications
isMuteRule(rule)
);
}
/**
* Checks if a given rule is a room mute rule as implemented by EW
* @param roomId - id of room to match
* @param rule - push rule
* @returns {boolean} true when rule mutes the given room
*/
function isRuleRoomMuteRuleForRoomId(roomId: string, rule: IPushRule): boolean {
if (!isRuleMaybeRoomMuteRule(rule)) {
return false;
}
// isRuleMaybeRoomMuteRule checks this condition exists
const cond = rule.conditions![0]!;
return cond.pattern === roomId;
} | rule.actions.length === 0 || (rule.actions.length === 1 && rule.actions[0] === PushRuleActionName.DontNotify)
);
}
export function determineUnreadState(
room?: Room,
threadId?: string,
): { color: NotificationColor; symbol: string | null; count: number } {
if (!room) {
return { symbol: null, count: 0, color: NotificationColor.None };
}
if (getUnsentMessages(room, threadId).length > 0) {
return { symbol: "!", count: 1, color: NotificationColor.Unsent };
}
if (getEffectiveMembership(room.getMyMembership()) === EffectiveMembership.Invite) {
return { symbol: "!", count: 1, color: NotificationColor.Red };
}
if (getRoomNotifsState(room.client, room.roomId) === RoomNotifState.Mute) {
|
function isMuteRule(rule: IPushRule): boolean {
// DontNotify is equivalent to the empty actions array
return ( | random_line_split |
parser.rs | ().map(|&b| char::from(b))
}
/// Read the next character from the input
fn read_char(&mut self) -> Option<char> {
self.state.split_first().map(|(&b, tail)| { |
#[must_use]
/// Read the next character from the input if it matches the target.
fn read_given_char(&mut self, target: char) -> Option<()> {
self.read_atomically(|p| {
p.read_char().and_then(|c| if c == target { Some(()) } else { None })
})
}
/// Helper for reading separators in an indexed loop. Reads the separator
/// character iff index > 0, then runs the parser. When used in a loop,
/// the separator character will only be read on index > 0 (see
/// read_ipv4_addr for an example)
fn read_separator<T, F>(&mut self, sep: char, index: usize, inner: F) -> Option<T>
where
F: FnOnce(&mut Parser<'_>) -> Option<T>,
{
self.read_atomically(move |p| {
if index > 0 {
p.read_given_char(sep)?;
}
inner(p)
})
}
// Read a number off the front of the input in the given radix, stopping
// at the first non-digit character or eof. Fails if the number has more
// digits than max_digits or if there is no number.
#[allow(clippy::if_same_then_else)]
fn read_number<T: ReadNumberHelper>(
&mut self,
radix: u32,
max_digits: Option<usize>,
allow_zero_prefix: bool,
) -> Option<T> {
self.read_atomically(move |p| {
let mut result = T::ZERO;
let mut digit_count = 0;
let has_leading_zero = p.peek_char() == Some('0');
while let Some(digit) = p.read_atomically(|p| p.read_char()?.to_digit(radix)) {
result = result.checked_mul(radix)?;
result = result.checked_add(digit)?;
digit_count += 1;
if let Some(max_digits) = max_digits {
if digit_count > max_digits {
return None;
}
}
}
if digit_count == 0 {
None
} else if !allow_zero_prefix && has_leading_zero && digit_count > 1 {
None
} else {
Some(result)
}
})
}
/// Read an IPv4 address.
fn read_ipv4_addr(&mut self) -> Option<Ipv4Addr> {
self.read_atomically(|p| {
let mut groups = [0; 4];
for (i, slot) in groups.iter_mut().enumerate() {
*slot = p.read_separator('.', i, |p| {
// Disallow octal number in IP string.
// https://tools.ietf.org/html/rfc6943#section-3.1.1
p.read_number(10, Some(3), false)
})?;
}
Some(groups.into())
})
}
/// Read an IPv6 Address.
fn read_ipv6_addr(&mut self) -> Option<Ipv6Addr> {
/// Read a chunk of an IPv6 address into `groups`. Returns the number
/// of groups read, along with a bool indicating if an embedded
/// trailing IPv4 address was read. Specifically, read a series of
/// colon-separated IPv6 groups (0x0000 - 0xFFFF), with an optional
/// trailing embedded IPv4 address.
fn read_groups(p: &mut Parser<'_>, groups: &mut [u16]) -> (usize, bool) {
let limit = groups.len();
for (i, slot) in groups.iter_mut().enumerate() {
// Try to read a trailing embedded IPv4 address. There must be
// at least two groups left.
if i < limit - 1 {
let ipv4 = p.read_separator(':', i, |p| p.read_ipv4_addr());
if let Some(v4_addr) = ipv4 {
let [one, two, three, four] = v4_addr.octets();
groups[i] = u16::from_be_bytes([one, two]);
groups[i + 1] = u16::from_be_bytes([three, four]);
return (i + 2, true);
}
}
let group = p.read_separator(':', i, |p| p.read_number(16, Some(4), true));
match group {
Some(g) => *slot = g,
None => return (i, false),
}
}
(groups.len(), false)
}
self.read_atomically(|p| {
// Read the front part of the address; either the whole thing, or up
// to the first ::
let mut head = [0; 8];
let (head_size, head_ipv4) = read_groups(p, &mut head);
if head_size == 8 {
return Some(head.into());
}
// IPv4 part is not allowed before `::`
if head_ipv4 {
return None;
}
// Read `::` if previous code parsed less than 8 groups.
// `::` indicates one or more groups of 16 bits of zeros.
p.read_given_char(':')?;
p.read_given_char(':')?;
// Read the back part of the address. The :: must contain at least one
// set of zeroes, so our max length is 7.
let mut tail = [0; 7];
let limit = 8 - (head_size + 1);
let (tail_size, _) = read_groups(p, &mut tail[..limit]);
// Concat the head and tail of the IP address
head[(8 - tail_size)..8].copy_from_slice(&tail[..tail_size]);
Some(head.into())
})
}
/// Read an IP Address, either IPv4 or IPv6.
fn read_ip_addr(&mut self) -> Option<IpAddr> {
self.read_ipv4_addr().map(IpAddr::V4).or_else(move || self.read_ipv6_addr().map(IpAddr::V6))
}
/// Read a `:` followed by a port in base 10.
fn read_port(&mut self) -> Option<u16> {
self.read_atomically(|p| {
p.read_given_char(':')?;
p.read_number(10, None, true)
})
}
/// Read a `%` followed by a scope ID in base 10.
fn read_scope_id(&mut self) -> Option<u32> {
self.read_atomically(|p| {
p.read_given_char('%')?;
p.read_number(10, None, true)
})
}
/// Read an IPv4 address with a port.
fn read_socket_addr_v4(&mut self) -> Option<SocketAddrV4> {
self.read_atomically(|p| {
let ip = p.read_ipv4_addr()?;
let port = p.read_port()?;
Some(SocketAddrV4::new(ip, port))
})
}
/// Read an IPv6 address with a port.
fn read_socket_addr_v6(&mut self) -> Option<SocketAddrV6> {
self.read_atomically(|p| {
p.read_given_char('[')?;
let ip = p.read_ipv6_addr()?;
let scope_id = p.read_scope_id().unwrap_or(0);
p.read_given_char(']')?;
let port = p.read_port()?;
Some(SocketAddrV6::new(ip, port, 0, scope_id))
})
}
/// Read an IP address with a port
fn read_socket_addr(&mut self) -> Option<SocketAddr> {
self.read_socket_addr_v4()
.map(SocketAddr::V4)
.or_else(|| self.read_socket_addr_v6().map(SocketAddr::V6))
}
}
impl IpAddr {
/// Parse an IP address from a slice of bytes.
///
/// ```
/// #![feature(addr_parse_ascii)]
///
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
/// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
///
/// assert_eq!(IpAddr::parse_ascii(b"127.0.0.1"), Ok(localhost_v4));
/// assert_eq!(IpAddr::parse_ascii(b"::1"), Ok(localhost_v6));
/// ```
pub fn parse_ascii(b: &[u8]) -> Result<Self, AddrParseError> {
Parser::new(b).parse_with(|p| p.read_ip_addr(), AddrKind::Ip)
}
}
impl FromStr for IpAddr {
type Err = AddrParseError;
fn from_str(s: &str) -> Result<IpAddr, AddrParseError> {
Self::parse_ascii(s.as_bytes())
}
}
impl Ipv4Addr {
/// Parse an IPv4 | self.state = tail;
char::from(b)
})
} | random_line_split |
parser.rs | ZERO;
let mut digit_count = 0;
let has_leading_zero = p.peek_char() == Some('0');
while let Some(digit) = p.read_atomically(|p| p.read_char()?.to_digit(radix)) {
result = result.checked_mul(radix)?;
result = result.checked_add(digit)?;
digit_count += 1;
if let Some(max_digits) = max_digits {
if digit_count > max_digits {
return None;
}
}
}
if digit_count == 0 {
None
} else if !allow_zero_prefix && has_leading_zero && digit_count > 1 {
None
} else {
Some(result)
}
})
}
/// Read an IPv4 address.
fn read_ipv4_addr(&mut self) -> Option<Ipv4Addr> {
self.read_atomically(|p| {
let mut groups = [0; 4];
for (i, slot) in groups.iter_mut().enumerate() {
*slot = p.read_separator('.', i, |p| {
// Disallow octal number in IP string.
// https://tools.ietf.org/html/rfc6943#section-3.1.1
p.read_number(10, Some(3), false)
})?;
}
Some(groups.into())
})
}
/// Read an IPv6 Address.
fn read_ipv6_addr(&mut self) -> Option<Ipv6Addr> {
/// Read a chunk of an IPv6 address into `groups`. Returns the number
/// of groups read, along with a bool indicating if an embedded
/// trailing IPv4 address was read. Specifically, read a series of
/// colon-separated IPv6 groups (0x0000 - 0xFFFF), with an optional
/// trailing embedded IPv4 address.
fn read_groups(p: &mut Parser<'_>, groups: &mut [u16]) -> (usize, bool) {
let limit = groups.len();
for (i, slot) in groups.iter_mut().enumerate() {
// Try to read a trailing embedded IPv4 address. There must be
// at least two groups left.
if i < limit - 1 {
let ipv4 = p.read_separator(':', i, |p| p.read_ipv4_addr());
if let Some(v4_addr) = ipv4 {
let [one, two, three, four] = v4_addr.octets();
groups[i] = u16::from_be_bytes([one, two]);
groups[i + 1] = u16::from_be_bytes([three, four]);
return (i + 2, true);
}
}
let group = p.read_separator(':', i, |p| p.read_number(16, Some(4), true));
match group {
Some(g) => *slot = g,
None => return (i, false),
}
}
(groups.len(), false)
}
self.read_atomically(|p| {
// Read the front part of the address; either the whole thing, or up
// to the first ::
let mut head = [0; 8];
let (head_size, head_ipv4) = read_groups(p, &mut head);
if head_size == 8 {
return Some(head.into());
}
// IPv4 part is not allowed before `::`
if head_ipv4 {
return None;
}
// Read `::` if previous code parsed less than 8 groups.
// `::` indicates one or more groups of 16 bits of zeros.
p.read_given_char(':')?;
p.read_given_char(':')?;
// Read the back part of the address. The :: must contain at least one
// set of zeroes, so our max length is 7.
let mut tail = [0; 7];
let limit = 8 - (head_size + 1);
let (tail_size, _) = read_groups(p, &mut tail[..limit]);
// Concat the head and tail of the IP address
head[(8 - tail_size)..8].copy_from_slice(&tail[..tail_size]);
Some(head.into())
})
}
/// Read an IP Address, either IPv4 or IPv6.
fn read_ip_addr(&mut self) -> Option<IpAddr> {
self.read_ipv4_addr().map(IpAddr::V4).or_else(move || self.read_ipv6_addr().map(IpAddr::V6))
}
/// Read a `:` followed by a port in base 10.
fn read_port(&mut self) -> Option<u16> {
self.read_atomically(|p| {
p.read_given_char(':')?;
p.read_number(10, None, true)
})
}
/// Read a `%` followed by a scope ID in base 10.
fn read_scope_id(&mut self) -> Option<u32> {
self.read_atomically(|p| {
p.read_given_char('%')?;
p.read_number(10, None, true)
})
}
/// Read an IPv4 address with a port.
fn read_socket_addr_v4(&mut self) -> Option<SocketAddrV4> {
self.read_atomically(|p| {
let ip = p.read_ipv4_addr()?;
let port = p.read_port()?;
Some(SocketAddrV4::new(ip, port))
})
}
/// Read an IPv6 address with a port.
fn read_socket_addr_v6(&mut self) -> Option<SocketAddrV6> {
self.read_atomically(|p| {
p.read_given_char('[')?;
let ip = p.read_ipv6_addr()?;
let scope_id = p.read_scope_id().unwrap_or(0);
p.read_given_char(']')?;
let port = p.read_port()?;
Some(SocketAddrV6::new(ip, port, 0, scope_id))
})
}
/// Read an IP address with a port
fn read_socket_addr(&mut self) -> Option<SocketAddr> {
self.read_socket_addr_v4()
.map(SocketAddr::V4)
.or_else(|| self.read_socket_addr_v6().map(SocketAddr::V6))
}
}
impl IpAddr {
/// Parse an IP address from a slice of bytes.
///
/// ```
/// #![feature(addr_parse_ascii)]
///
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
/// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
///
/// assert_eq!(IpAddr::parse_ascii(b"127.0.0.1"), Ok(localhost_v4));
/// assert_eq!(IpAddr::parse_ascii(b"::1"), Ok(localhost_v6));
/// ```
pub fn parse_ascii(b: &[u8]) -> Result<Self, AddrParseError> {
Parser::new(b).parse_with(|p| p.read_ip_addr(), AddrKind::Ip)
}
}
impl FromStr for IpAddr {
type Err = AddrParseError;
fn from_str(s: &str) -> Result<IpAddr, AddrParseError> {
Self::parse_ascii(s.as_bytes())
}
}
impl Ipv4Addr {
/// Parse an IPv4 address from a slice of bytes.
///
/// ```
/// #![feature(addr_parse_ascii)]
///
/// use std::net::Ipv4Addr;
///
/// let localhost = Ipv4Addr::new(127, 0, 0, 1);
///
/// assert_eq!(Ipv4Addr::parse_ascii(b"127.0.0.1"), Ok(localhost));
/// ```
pub fn parse_ascii(b: &[u8]) -> Result<Self, AddrParseError> {
// don't try to parse if too long
if b.len() > 15 {
Err(AddrParseError(AddrKind::Ipv4))
} else {
Parser::new(b).parse_with(|p| p.read_ipv4_addr(), AddrKind::Ipv4)
}
}
}
impl FromStr for Ipv4Addr {
type Err = AddrParseError;
fn from_str(s: &str) -> Result<Ipv4Addr, AddrParseError> {
Self::parse_ascii(s.as_bytes())
}
}
impl Ipv6Addr {
/// Parse an IPv6 address from a slice of bytes.
///
/// ```
/// #![feature(addr_parse_ascii)]
///
/// use std::net::Ipv6Addr;
///
/// let localhost = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
///
/// assert_eq!(Ipv6Addr::parse_ascii(b"::1"), Ok(localhost));
/// ```
pub fn parse_ascii(b: &[u8]) -> Result<Self, AddrParseError> {
Parser::new(b).parse_with(|p| p.read_ipv6_addr(), AddrKind::Ipv6)
}
}
impl FromStr for Ipv6Addr {
type Err = AddrParseError;
fn | from_str | identifier_name | |
parser.rs | ().map(|&b| char::from(b))
}
/// Read the next character from the input
fn read_char(&mut self) -> Option<char> {
self.state.split_first().map(|(&b, tail)| {
self.state = tail;
char::from(b)
})
}
#[must_use]
/// Read the next character from the input if it matches the target.
fn read_given_char(&mut self, target: char) -> Option<()> {
self.read_atomically(|p| {
p.read_char().and_then(|c| if c == target { Some(()) } else { None })
})
}
/// Helper for reading separators in an indexed loop. Reads the separator
/// character iff index > 0, then runs the parser. When used in a loop,
/// the separator character will only be read on index > 0 (see
/// read_ipv4_addr for an example)
fn read_separator<T, F>(&mut self, sep: char, index: usize, inner: F) -> Option<T>
where
F: FnOnce(&mut Parser<'_>) -> Option<T>,
|
// Read a number off the front of the input in the given radix, stopping
// at the first non-digit character or eof. Fails if the number has more
// digits than max_digits or if there is no number.
#[allow(clippy::if_same_then_else)]
fn read_number<T: ReadNumberHelper>(
&mut self,
radix: u32,
max_digits: Option<usize>,
allow_zero_prefix: bool,
) -> Option<T> {
self.read_atomically(move |p| {
let mut result = T::ZERO;
let mut digit_count = 0;
let has_leading_zero = p.peek_char() == Some('0');
while let Some(digit) = p.read_atomically(|p| p.read_char()?.to_digit(radix)) {
result = result.checked_mul(radix)?;
result = result.checked_add(digit)?;
digit_count += 1;
if let Some(max_digits) = max_digits {
if digit_count > max_digits {
return None;
}
}
}
if digit_count == 0 {
None
} else if !allow_zero_prefix && has_leading_zero && digit_count > 1 {
None
} else {
Some(result)
}
})
}
/// Read an IPv4 address.
fn read_ipv4_addr(&mut self) -> Option<Ipv4Addr> {
self.read_atomically(|p| {
let mut groups = [0; 4];
for (i, slot) in groups.iter_mut().enumerate() {
*slot = p.read_separator('.', i, |p| {
// Disallow octal number in IP string.
// https://tools.ietf.org/html/rfc6943#section-3.1.1
p.read_number(10, Some(3), false)
})?;
}
Some(groups.into())
})
}
/// Read an IPv6 Address.
fn read_ipv6_addr(&mut self) -> Option<Ipv6Addr> {
/// Read a chunk of an IPv6 address into `groups`. Returns the number
/// of groups read, along with a bool indicating if an embedded
/// trailing IPv4 address was read. Specifically, read a series of
/// colon-separated IPv6 groups (0x0000 - 0xFFFF), with an optional
/// trailing embedded IPv4 address.
fn read_groups(p: &mut Parser<'_>, groups: &mut [u16]) -> (usize, bool) {
let limit = groups.len();
for (i, slot) in groups.iter_mut().enumerate() {
// Try to read a trailing embedded IPv4 address. There must be
// at least two groups left.
if i < limit - 1 {
let ipv4 = p.read_separator(':', i, |p| p.read_ipv4_addr());
if let Some(v4_addr) = ipv4 {
let [one, two, three, four] = v4_addr.octets();
groups[i] = u16::from_be_bytes([one, two]);
groups[i + 1] = u16::from_be_bytes([three, four]);
return (i + 2, true);
}
}
let group = p.read_separator(':', i, |p| p.read_number(16, Some(4), true));
match group {
Some(g) => *slot = g,
None => return (i, false),
}
}
(groups.len(), false)
}
self.read_atomically(|p| {
// Read the front part of the address; either the whole thing, or up
// to the first ::
let mut head = [0; 8];
let (head_size, head_ipv4) = read_groups(p, &mut head);
if head_size == 8 {
return Some(head.into());
}
// IPv4 part is not allowed before `::`
if head_ipv4 {
return None;
}
// Read `::` if previous code parsed less than 8 groups.
// `::` indicates one or more groups of 16 bits of zeros.
p.read_given_char(':')?;
p.read_given_char(':')?;
// Read the back part of the address. The :: must contain at least one
// set of zeroes, so our max length is 7.
let mut tail = [0; 7];
let limit = 8 - (head_size + 1);
let (tail_size, _) = read_groups(p, &mut tail[..limit]);
// Concat the head and tail of the IP address
head[(8 - tail_size)..8].copy_from_slice(&tail[..tail_size]);
Some(head.into())
})
}
/// Read an IP Address, either IPv4 or IPv6.
fn read_ip_addr(&mut self) -> Option<IpAddr> {
self.read_ipv4_addr().map(IpAddr::V4).or_else(move || self.read_ipv6_addr().map(IpAddr::V6))
}
/// Read a `:` followed by a port in base 10.
fn read_port(&mut self) -> Option<u16> {
self.read_atomically(|p| {
p.read_given_char(':')?;
p.read_number(10, None, true)
})
}
/// Read a `%` followed by a scope ID in base 10.
fn read_scope_id(&mut self) -> Option<u32> {
self.read_atomically(|p| {
p.read_given_char('%')?;
p.read_number(10, None, true)
})
}
/// Read an IPv4 address with a port.
fn read_socket_addr_v4(&mut self) -> Option<SocketAddrV4> {
self.read_atomically(|p| {
let ip = p.read_ipv4_addr()?;
let port = p.read_port()?;
Some(SocketAddrV4::new(ip, port))
})
}
/// Read an IPv6 address with a port.
fn read_socket_addr_v6(&mut self) -> Option<SocketAddrV6> {
self.read_atomically(|p| {
p.read_given_char('[')?;
let ip = p.read_ipv6_addr()?;
let scope_id = p.read_scope_id().unwrap_or(0);
p.read_given_char(']')?;
let port = p.read_port()?;
Some(SocketAddrV6::new(ip, port, 0, scope_id))
})
}
/// Read an IP address with a port
fn read_socket_addr(&mut self) -> Option<SocketAddr> {
self.read_socket_addr_v4()
.map(SocketAddr::V4)
.or_else(|| self.read_socket_addr_v6().map(SocketAddr::V6))
}
}
impl IpAddr {
/// Parse an IP address from a slice of bytes.
///
/// ```
/// #![feature(addr_parse_ascii)]
///
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
/// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
///
/// assert_eq!(IpAddr::parse_ascii(b"127.0.0.1"), Ok(localhost_v4));
/// assert_eq!(IpAddr::parse_ascii(b"::1"), Ok(localhost_v6));
/// ```
pub fn parse_ascii(b: &[u8]) -> Result<Self, AddrParseError> {
Parser::new(b).parse_with(|p| p.read_ip_addr(), AddrKind::Ip)
}
}
impl FromStr for IpAddr {
type Err = AddrParseError;
fn from_str(s: &str) -> Result<IpAddr, AddrParseError> {
Self::parse_ascii(s.as_bytes())
}
}
impl Ipv4Addr {
/// Parse an IPv4 | {
self.read_atomically(move |p| {
if index > 0 {
p.read_given_char(sep)?;
}
inner(p)
})
} | identifier_body |
cubic64.rs | ].sort_by(cmp_f64);
let mut valid_count = 0;
let mut index = 0;
while index < extrema {
let min = extreme_ts[index];
index += 1;
let max = extreme_ts[index];
if min == max {
continue;
}
let new_t = self.binary_search(min, max, axis_intercept, x_axis);
if new_t >= 0.0 {
if valid_count >= 3 {
return 0;
}
valid_roots[valid_count] = new_t;
valid_count += 1;
}
}
valid_count
}
fn find_inflections(&self, t_values: &mut [f64]) -> usize {
let ax = self.points[1].x - self.points[0].x;
let ay = self.points[1].y - self.points[0].y;
let bx = self.points[2].x - 2.0 * self.points[1].x + self.points[0].x;
let by = self.points[2].y - 2.0 * self.points[1].y + self.points[0].y;
let cx = self.points[3].x + 3.0 * (self.points[1].x - self.points[2].x) - self.points[0].x;
let cy = self.points[3].y + 3.0 * (self.points[1].y - self.points[2].y) - self.points[0].y;
quad64::roots_valid_t(
bx * cy - by * cx,
ax * cy - ay * cx,
ax * by - ay * bx,
t_values,
)
}
// give up when changing t no longer moves point
// also, copy point rather than recompute it when it does change
fn binary_search(&self, min: f64, max: f64, axis_intercept: f64, x_axis: SearchAxis) -> f64 {
let mut t = (min + max) / 2.0;
let mut step = (t - min) / 2.0;
let mut cubic_at_t = self.point_at_t(t);
let mut calc_pos = cubic_at_t.axis_coord(x_axis);
let mut calc_dist = calc_pos - axis_intercept;
loop {
let prior_t = min.max(t - step);
let less_pt = self.point_at_t(prior_t);
if less_pt.x.approximately_equal_half(cubic_at_t.x)
&& less_pt.y.approximately_equal_half(cubic_at_t.y)
{
return -1.0; // binary search found no point at this axis intercept
}
let less_dist = less_pt.axis_coord(x_axis) - axis_intercept;
let last_step = step;
step /= 2.0;
let ok = if calc_dist > 0.0 {
calc_dist > less_dist
} else {
calc_dist < less_dist
};
if ok {
t = prior_t;
} else | continue;
}
t = next_t;
}
let test_at_t = self.point_at_t(t);
cubic_at_t = test_at_t;
calc_pos = cubic_at_t.axis_coord(x_axis);
calc_dist = calc_pos - axis_intercept;
if calc_pos.approximately_equal(axis_intercept) {
break;
}
}
t
}
pub fn chop_at(&self, t: f64) -> Cubic64Pair {
let mut dst = [Point64::zero(); 7];
if t == 0.5 {
dst[0] = self.points[0];
dst[1].x = (self.points[0].x + self.points[1].x) / 2.0;
dst[1].y = (self.points[0].y + self.points[1].y) / 2.0;
dst[2].x = (self.points[0].x + 2.0 * self.points[1].x + self.points[2].x) / 4.0;
dst[2].y = (self.points[0].y + 2.0 * self.points[1].y + self.points[2].y) / 4.0;
dst[3].x =
(self.points[0].x + 3.0 * (self.points[1].x + self.points[2].x) + self.points[3].x)
/ 8.0;
dst[3].y =
(self.points[0].y + 3.0 * (self.points[1].y + self.points[2].y) + self.points[3].y)
/ 8.0;
dst[4].x = (self.points[1].x + 2.0 * self.points[2].x + self.points[3].x) / 4.0;
dst[4].y = (self.points[1].y + 2.0 * self.points[2].y + self.points[3].y) / 4.0;
dst[5].x = (self.points[2].x + self.points[3].x) / 2.0;
dst[5].y = (self.points[2].y + self.points[3].y) / 2.0;
dst[6] = self.points[3];
Cubic64Pair { points: dst }
} else {
interp_cubic_coords_x(&self.points, t, &mut dst);
interp_cubic_coords_y(&self.points, t, &mut dst);
Cubic64Pair { points: dst }
}
}
}
pub fn coefficients(src: &[f64]) -> (f64, f64, f64, f64) {
let mut a = src[6]; // d
let mut b = src[4] * 3.0; // 3*c
let mut c = src[2] * 3.0; // 3*b
let d = src[0]; // a
a -= d - c + b; // A = -a + 3*b - 3*c + d
b += 3.0 * d - 2.0 * c; // B = 3*a - 6*b + 3*c
c -= 3.0 * d; // C = -3*a + 3*b
(a, b, c, d)
}
// from SkGeometry.cpp (and Numeric Solutions, 5.6)
pub fn roots_valid_t(a: f64, b: f64, c: f64, d: f64, t: &mut [f64; 3]) -> usize {
let mut s = [0.0; 3];
let real_roots = roots_real(a, b, c, d, &mut s);
let mut found_roots = quad64::push_valid_ts(&s, real_roots, t);
'outer: for index in 0..real_roots {
let t_value = s[index];
if !t_value.approximately_one_or_less() && t_value.between(1.0, 1.00005) {
for idx2 in 0..found_roots {
if t[idx2].approximately_equal(1.0) {
continue 'outer;
}
}
debug_assert!(found_roots < 3);
t[found_roots] = 1.0;
found_roots += 1;
} else if !t_value.approximately_zero_or_more() && t_value.between(-0.00005, 0.0) {
for idx2 in 0..found_roots {
if t[idx2].approximately_equal(0.0) {
continue 'outer;
}
}
debug_assert!(found_roots < 3);
t[found_roots] = 0.0;
found_roots += 1;
}
}
found_roots
}
fn roots_real(a: f64, b: f64, c: f64, d: f64, s: &mut [f64; 3]) -> usize {
if a.approximately_zero()
&& a.approximately_zero_when_compared_to(b)
&& a.approximately_zero_when_compared_to(c)
&& a.approximately_zero_when_compared_to(d)
{
// we're just a quadratic
return quad64::roots_real(b, c, d | {
let next_t = t + last_step;
if next_t > max {
return -1.0;
}
let more_pt = self.point_at_t(next_t);
if more_pt.x.approximately_equal_half(cubic_at_t.x)
&& more_pt.y.approximately_equal_half(cubic_at_t.y)
{
return -1.0; // binary search found no point at this axis intercept
}
let more_dist = more_pt.axis_coord(x_axis) - axis_intercept;
let ok = if calc_dist > 0.0 {
calc_dist <= more_dist
} else {
calc_dist >= more_dist
};
if ok { | conditional_block |
cubic64.rs | _t;
valid_count += 1;
}
}
valid_count
}
fn find_inflections(&self, t_values: &mut [f64]) -> usize {
let ax = self.points[1].x - self.points[0].x;
let ay = self.points[1].y - self.points[0].y;
let bx = self.points[2].x - 2.0 * self.points[1].x + self.points[0].x;
let by = self.points[2].y - 2.0 * self.points[1].y + self.points[0].y;
let cx = self.points[3].x + 3.0 * (self.points[1].x - self.points[2].x) - self.points[0].x;
let cy = self.points[3].y + 3.0 * (self.points[1].y - self.points[2].y) - self.points[0].y;
quad64::roots_valid_t(
bx * cy - by * cx,
ax * cy - ay * cx,
ax * by - ay * bx,
t_values,
)
}
// give up when changing t no longer moves point
// also, copy point rather than recompute it when it does change
fn binary_search(&self, min: f64, max: f64, axis_intercept: f64, x_axis: SearchAxis) -> f64 {
let mut t = (min + max) / 2.0;
let mut step = (t - min) / 2.0;
let mut cubic_at_t = self.point_at_t(t);
let mut calc_pos = cubic_at_t.axis_coord(x_axis);
let mut calc_dist = calc_pos - axis_intercept;
loop {
let prior_t = min.max(t - step);
let less_pt = self.point_at_t(prior_t);
if less_pt.x.approximately_equal_half(cubic_at_t.x)
&& less_pt.y.approximately_equal_half(cubic_at_t.y)
{
return -1.0; // binary search found no point at this axis intercept
}
let less_dist = less_pt.axis_coord(x_axis) - axis_intercept;
let last_step = step;
step /= 2.0;
let ok = if calc_dist > 0.0 {
calc_dist > less_dist
} else {
calc_dist < less_dist
};
if ok {
t = prior_t;
} else {
let next_t = t + last_step;
if next_t > max {
return -1.0;
}
let more_pt = self.point_at_t(next_t);
if more_pt.x.approximately_equal_half(cubic_at_t.x)
&& more_pt.y.approximately_equal_half(cubic_at_t.y)
{
return -1.0; // binary search found no point at this axis intercept
}
let more_dist = more_pt.axis_coord(x_axis) - axis_intercept;
let ok = if calc_dist > 0.0 {
calc_dist <= more_dist
} else {
calc_dist >= more_dist
};
if ok {
continue;
}
t = next_t;
}
let test_at_t = self.point_at_t(t);
cubic_at_t = test_at_t;
calc_pos = cubic_at_t.axis_coord(x_axis);
calc_dist = calc_pos - axis_intercept;
if calc_pos.approximately_equal(axis_intercept) {
break;
}
}
t
}
pub fn chop_at(&self, t: f64) -> Cubic64Pair {
let mut dst = [Point64::zero(); 7];
if t == 0.5 {
dst[0] = self.points[0];
dst[1].x = (self.points[0].x + self.points[1].x) / 2.0;
dst[1].y = (self.points[0].y + self.points[1].y) / 2.0;
dst[2].x = (self.points[0].x + 2.0 * self.points[1].x + self.points[2].x) / 4.0;
dst[2].y = (self.points[0].y + 2.0 * self.points[1].y + self.points[2].y) / 4.0;
dst[3].x =
(self.points[0].x + 3.0 * (self.points[1].x + self.points[2].x) + self.points[3].x)
/ 8.0;
dst[3].y =
(self.points[0].y + 3.0 * (self.points[1].y + self.points[2].y) + self.points[3].y)
/ 8.0;
dst[4].x = (self.points[1].x + 2.0 * self.points[2].x + self.points[3].x) / 4.0;
dst[4].y = (self.points[1].y + 2.0 * self.points[2].y + self.points[3].y) / 4.0;
dst[5].x = (self.points[2].x + self.points[3].x) / 2.0;
dst[5].y = (self.points[2].y + self.points[3].y) / 2.0;
dst[6] = self.points[3];
Cubic64Pair { points: dst }
} else {
interp_cubic_coords_x(&self.points, t, &mut dst);
interp_cubic_coords_y(&self.points, t, &mut dst);
Cubic64Pair { points: dst }
}
}
}
pub fn coefficients(src: &[f64]) -> (f64, f64, f64, f64) {
let mut a = src[6]; // d
let mut b = src[4] * 3.0; // 3*c
let mut c = src[2] * 3.0; // 3*b
let d = src[0]; // a
a -= d - c + b; // A = -a + 3*b - 3*c + d
b += 3.0 * d - 2.0 * c; // B = 3*a - 6*b + 3*c
c -= 3.0 * d; // C = -3*a + 3*b
(a, b, c, d)
}
// from SkGeometry.cpp (and Numeric Solutions, 5.6)
pub fn roots_valid_t(a: f64, b: f64, c: f64, d: f64, t: &mut [f64; 3]) -> usize {
let mut s = [0.0; 3];
let real_roots = roots_real(a, b, c, d, &mut s);
let mut found_roots = quad64::push_valid_ts(&s, real_roots, t);
'outer: for index in 0..real_roots {
let t_value = s[index];
if !t_value.approximately_one_or_less() && t_value.between(1.0, 1.00005) {
for idx2 in 0..found_roots {
if t[idx2].approximately_equal(1.0) {
continue 'outer;
}
}
debug_assert!(found_roots < 3);
t[found_roots] = 1.0;
found_roots += 1;
} else if !t_value.approximately_zero_or_more() && t_value.between(-0.00005, 0.0) {
for idx2 in 0..found_roots {
if t[idx2].approximately_equal(0.0) {
continue 'outer;
}
}
debug_assert!(found_roots < 3);
t[found_roots] = 0.0;
found_roots += 1;
}
}
found_roots
}
fn roots_real(a: f64, b: f64, c: f64, d: f64, s: &mut [f64; 3]) -> usize {
if a.approximately_zero()
&& a.approximately_zero_when_compared_to(b)
&& a.approximately_zero_when_compared_to(c)
&& a.approximately_zero_when_compared_to(d)
{
// we're just a quadratic
return quad64::roots_real(b, c, d, s);
}
if d.approximately_zero_when_compared_to(a)
&& d.approximately_zero_when_compared_to(b)
&& d.approximately_zero_when_compared_to(c)
{
// 0 is one root
let mut num = quad64::roots_real(a, b, c, s);
for i in 0..num {
if s[i].approximately_zero() {
return num;
}
}
s[num] = 0.0;
num += 1; |
return num; | random_line_split | |
cubic64.rs | ].sort_by(cmp_f64);
let mut valid_count = 0;
let mut index = 0;
while index < extrema {
let min = extreme_ts[index];
index += 1;
let max = extreme_ts[index];
if min == max {
continue;
}
let new_t = self.binary_search(min, max, axis_intercept, x_axis);
if new_t >= 0.0 {
if valid_count >= 3 {
return 0;
}
valid_roots[valid_count] = new_t;
valid_count += 1;
}
}
valid_count
}
fn find_inflections(&self, t_values: &mut [f64]) -> usize {
let ax = self.points[1].x - self.points[0].x;
let ay = self.points[1].y - self.points[0].y;
let bx = self.points[2].x - 2.0 * self.points[1].x + self.points[0].x;
let by = self.points[2].y - 2.0 * self.points[1].y + self.points[0].y;
let cx = self.points[3].x + 3.0 * (self.points[1].x - self.points[2].x) - self.points[0].x;
let cy = self.points[3].y + 3.0 * (self.points[1].y - self.points[2].y) - self.points[0].y;
quad64::roots_valid_t(
bx * cy - by * cx,
ax * cy - ay * cx,
ax * by - ay * bx,
t_values,
)
}
// give up when changing t no longer moves point
// also, copy point rather than recompute it when it does change
fn binary_search(&self, min: f64, max: f64, axis_intercept: f64, x_axis: SearchAxis) -> f64 {
let mut t = (min + max) / 2.0;
let mut step = (t - min) / 2.0;
let mut cubic_at_t = self.point_at_t(t);
let mut calc_pos = cubic_at_t.axis_coord(x_axis);
let mut calc_dist = calc_pos - axis_intercept;
loop {
let prior_t = min.max(t - step);
let less_pt = self.point_at_t(prior_t);
if less_pt.x.approximately_equal_half(cubic_at_t.x)
&& less_pt.y.approximately_equal_half(cubic_at_t.y)
{
return -1.0; // binary search found no point at this axis intercept
}
let less_dist = less_pt.axis_coord(x_axis) - axis_intercept;
let last_step = step;
step /= 2.0;
let ok = if calc_dist > 0.0 {
calc_dist > less_dist
} else {
calc_dist < less_dist
};
if ok {
t = prior_t;
} else {
let next_t = t + last_step;
if next_t > max {
return -1.0;
}
let more_pt = self.point_at_t(next_t);
if more_pt.x.approximately_equal_half(cubic_at_t.x)
&& more_pt.y.approximately_equal_half(cubic_at_t.y)
{
return -1.0; // binary search found no point at this axis intercept
}
let more_dist = more_pt.axis_coord(x_axis) - axis_intercept;
let ok = if calc_dist > 0.0 {
calc_dist <= more_dist
} else {
calc_dist >= more_dist
};
if ok {
continue;
}
t = next_t;
}
let test_at_t = self.point_at_t(t);
cubic_at_t = test_at_t;
calc_pos = cubic_at_t.axis_coord(x_axis);
calc_dist = calc_pos - axis_intercept;
if calc_pos.approximately_equal(axis_intercept) {
break;
}
}
t
}
pub fn chop_at(&self, t: f64) -> Cubic64Pair {
let mut dst = [Point64::zero(); 7];
if t == 0.5 {
dst[0] = self.points[0];
dst[1].x = (self.points[0].x + self.points[1].x) / 2.0;
dst[1].y = (self.points[0].y + self.points[1].y) / 2.0;
dst[2].x = (self.points[0].x + 2.0 * self.points[1].x + self.points[2].x) / 4.0;
dst[2].y = (self.points[0].y + 2.0 * self.points[1].y + self.points[2].y) / 4.0;
dst[3].x =
(self.points[0].x + 3.0 * (self.points[1].x + self.points[2].x) + self.points[3].x)
/ 8.0;
dst[3].y =
(self.points[0].y + 3.0 * (self.points[1].y + self.points[2].y) + self.points[3].y)
/ 8.0;
dst[4].x = (self.points[1].x + 2.0 * self.points[2].x + self.points[3].x) / 4.0;
dst[4].y = (self.points[1].y + 2.0 * self.points[2].y + self.points[3].y) / 4.0;
dst[5].x = (self.points[2].x + self.points[3].x) / 2.0;
dst[5].y = (self.points[2].y + self.points[3].y) / 2.0;
dst[6] = self.points[3];
Cubic64Pair { points: dst }
} else {
interp_cubic_coords_x(&self.points, t, &mut dst);
interp_cubic_coords_y(&self.points, t, &mut dst);
Cubic64Pair { points: dst }
}
}
}
pub fn | (src: &[f64]) -> (f64, f64, f64, f64) {
let mut a = src[6]; // d
let mut b = src[4] * 3.0; // 3*c
let mut c = src[2] * 3.0; // 3*b
let d = src[0]; // a
a -= d - c + b; // A = -a + 3*b - 3*c + d
b += 3.0 * d - 2.0 * c; // B = 3*a - 6*b + 3*c
c -= 3.0 * d; // C = -3*a + 3*b
(a, b, c, d)
}
// from SkGeometry.cpp (and Numeric Solutions, 5.6)
pub fn roots_valid_t(a: f64, b: f64, c: f64, d: f64, t: &mut [f64; 3]) -> usize {
let mut s = [0.0; 3];
let real_roots = roots_real(a, b, c, d, &mut s);
let mut found_roots = quad64::push_valid_ts(&s, real_roots, t);
'outer: for index in 0..real_roots {
let t_value = s[index];
if !t_value.approximately_one_or_less() && t_value.between(1.0, 1.00005) {
for idx2 in 0..found_roots {
if t[idx2].approximately_equal(1.0) {
continue 'outer;
}
}
debug_assert!(found_roots < 3);
t[found_roots] = 1.0;
found_roots += 1;
} else if !t_value.approximately_zero_or_more() && t_value.between(-0.00005, 0.0) {
for idx2 in 0..found_roots {
if t[idx2].approximately_equal(0.0) {
continue 'outer;
}
}
debug_assert!(found_roots < 3);
t[found_roots] = 0.0;
found_roots += 1;
}
}
found_roots
}
fn roots_real(a: f64, b: f64, c: f64, d: f64, s: &mut [f64; 3]) -> usize {
if a.approximately_zero()
&& a.approximately_zero_when_compared_to(b)
&& a.approximately_zero_when_compared_to(c)
&& a.approximately_zero_when_compared_to(d)
{
// we're just a quadratic
return quad64::roots_real(b, c, | coefficients | identifier_name |
cubic64.rs | ].sort_by(cmp_f64);
let mut valid_count = 0;
let mut index = 0;
while index < extrema {
let min = extreme_ts[index];
index += 1;
let max = extreme_ts[index];
if min == max {
continue;
}
let new_t = self.binary_search(min, max, axis_intercept, x_axis);
if new_t >= 0.0 {
if valid_count >= 3 {
return 0;
}
valid_roots[valid_count] = new_t;
valid_count += 1;
}
}
valid_count
}
fn find_inflections(&self, t_values: &mut [f64]) -> usize {
let ax = self.points[1].x - self.points[0].x;
let ay = self.points[1].y - self.points[0].y;
let bx = self.points[2].x - 2.0 * self.points[1].x + self.points[0].x;
let by = self.points[2].y - 2.0 * self.points[1].y + self.points[0].y;
let cx = self.points[3].x + 3.0 * (self.points[1].x - self.points[2].x) - self.points[0].x;
let cy = self.points[3].y + 3.0 * (self.points[1].y - self.points[2].y) - self.points[0].y;
quad64::roots_valid_t(
bx * cy - by * cx,
ax * cy - ay * cx,
ax * by - ay * bx,
t_values,
)
}
// give up when changing t no longer moves point
// also, copy point rather than recompute it when it does change
fn binary_search(&self, min: f64, max: f64, axis_intercept: f64, x_axis: SearchAxis) -> f64 {
let mut t = (min + max) / 2.0;
let mut step = (t - min) / 2.0;
let mut cubic_at_t = self.point_at_t(t);
let mut calc_pos = cubic_at_t.axis_coord(x_axis);
let mut calc_dist = calc_pos - axis_intercept;
loop {
let prior_t = min.max(t - step);
let less_pt = self.point_at_t(prior_t);
if less_pt.x.approximately_equal_half(cubic_at_t.x)
&& less_pt.y.approximately_equal_half(cubic_at_t.y)
{
return -1.0; // binary search found no point at this axis intercept
}
let less_dist = less_pt.axis_coord(x_axis) - axis_intercept;
let last_step = step;
step /= 2.0;
let ok = if calc_dist > 0.0 {
calc_dist > less_dist
} else {
calc_dist < less_dist
};
if ok {
t = prior_t;
} else {
let next_t = t + last_step;
if next_t > max {
return -1.0;
}
let more_pt = self.point_at_t(next_t);
if more_pt.x.approximately_equal_half(cubic_at_t.x)
&& more_pt.y.approximately_equal_half(cubic_at_t.y)
{
return -1.0; // binary search found no point at this axis intercept
}
let more_dist = more_pt.axis_coord(x_axis) - axis_intercept;
let ok = if calc_dist > 0.0 {
calc_dist <= more_dist
} else {
calc_dist >= more_dist
};
if ok {
continue;
}
t = next_t;
}
let test_at_t = self.point_at_t(t);
cubic_at_t = test_at_t;
calc_pos = cubic_at_t.axis_coord(x_axis);
calc_dist = calc_pos - axis_intercept;
if calc_pos.approximately_equal(axis_intercept) {
break;
}
}
t
}
pub fn chop_at(&self, t: f64) -> Cubic64Pair {
let mut dst = [Point64::zero(); 7];
if t == 0.5 {
dst[0] = self.points[0];
dst[1].x = (self.points[0].x + self.points[1].x) / 2.0;
dst[1].y = (self.points[0].y + self.points[1].y) / 2.0;
dst[2].x = (self.points[0].x + 2.0 * self.points[1].x + self.points[2].x) / 4.0;
dst[2].y = (self.points[0].y + 2.0 * self.points[1].y + self.points[2].y) / 4.0;
dst[3].x =
(self.points[0].x + 3.0 * (self.points[1].x + self.points[2].x) + self.points[3].x)
/ 8.0;
dst[3].y =
(self.points[0].y + 3.0 * (self.points[1].y + self.points[2].y) + self.points[3].y)
/ 8.0;
dst[4].x = (self.points[1].x + 2.0 * self.points[2].x + self.points[3].x) / 4.0;
dst[4].y = (self.points[1].y + 2.0 * self.points[2].y + self.points[3].y) / 4.0;
dst[5].x = (self.points[2].x + self.points[3].x) / 2.0;
dst[5].y = (self.points[2].y + self.points[3].y) / 2.0;
dst[6] = self.points[3];
Cubic64Pair { points: dst }
} else {
interp_cubic_coords_x(&self.points, t, &mut dst);
interp_cubic_coords_y(&self.points, t, &mut dst);
Cubic64Pair { points: dst }
}
}
}
pub fn coefficients(src: &[f64]) -> (f64, f64, f64, f64) {
let mut a = src[6]; // d
let mut b = src[4] * 3.0; // 3*c
let mut c = src[2] * 3.0; // 3*b
let d = src[0]; // a
a -= d - c + b; // A = -a + 3*b - 3*c + d
b += 3.0 * d - 2.0 * c; // B = 3*a - 6*b + 3*c
c -= 3.0 * d; // C = -3*a + 3*b
(a, b, c, d)
}
// from SkGeometry.cpp (and Numeric Solutions, 5.6)
pub fn roots_valid_t(a: f64, b: f64, c: f64, d: f64, t: &mut [f64; 3]) -> usize | }
}
debug_assert!(found_roots < 3);
t[found_roots] = 0.0;
found_roots += 1;
}
}
found_roots
}
fn roots_real(a: f64, b: f64, c: f64, d: f64, s: &mut [f64; 3]) -> usize {
if a.approximately_zero()
&& a.approximately_zero_when_compared_to(b)
&& a.approximately_zero_when_compared_to(c)
&& a.approximately_zero_when_compared_to(d)
{
// we're just a quadratic
return quad64::roots_real(b, c, d | {
let mut s = [0.0; 3];
let real_roots = roots_real(a, b, c, d, &mut s);
let mut found_roots = quad64::push_valid_ts(&s, real_roots, t);
'outer: for index in 0..real_roots {
let t_value = s[index];
if !t_value.approximately_one_or_less() && t_value.between(1.0, 1.00005) {
for idx2 in 0..found_roots {
if t[idx2].approximately_equal(1.0) {
continue 'outer;
}
}
debug_assert!(found_roots < 3);
t[found_roots] = 1.0;
found_roots += 1;
} else if !t_value.approximately_zero_or_more() && t_value.between(-0.00005, 0.0) {
for idx2 in 0..found_roots {
if t[idx2].approximately_equal(0.0) {
continue 'outer; | identifier_body |
main.py | ="Light blue", fg=mycolor)
l3.place(x=210, y=80)
l3=tk.Label(window1,text="Amount",bg="Light blue", fg=mycolor)
l3.place(x=310, y=80)
def store() :
global added_count
added_count=added_count+1
global e1,e2
usern=e1.get()
phno=e2.get()
x=entry1.get()
y=entry2.get()
z=entry3.get()
y=int(y)
z=int(z)
w=z*y
l4=tk.Label(window1,text=(str(w)+"Rs."),bg="Light blue", fg=mycolor)
l4.place(x=310,y=ind)
l5=tk.Label(window1,text="Added.",bg="Light blue", fg=mycolor)
l5.place(x=410,y=ind)
curr_user.append(usern)
curr_phone.append(phno)
curr_date.append(date)
curr_time.append(time)
curr_name.append(x)
curr_price.append(y)
curr_quantity.append(z)
curr_amount.append(w)
curr_cno.append(cno)
def newent() :
global newent_count
newent_count=newent_count+1
if(newent_count!=added_count+1 and newent_count!=0):
store()
global ind
ind=ind+20
global entry1,entry2,entry3
entry1=tk.Entry(window1)
entry1.place(x=10,y=ind)
entry = AutocompleteEntry(entry1)
test_list=list(set(pd.read_csv("./database.csv")['Name']))
if(np.nan in test_list):
test_list.remove(np.nan)
entry.set_completion_list(test_list)
entry.pack()
entry.focus_set()
entry2=tk.Entry(window1)
entry2.place(x=110,y=ind)
entry3=tk.Entry(window1)
entry3.place(x=210,y=ind)
button1=tk.Button(window1,text="Add",command=store,fg="White", bg=mycolor)
button1.place(x=400,y=430)
button1=tk.Button(window1,text="New item",command=newent, fg="White", bg=mycolor)
button1.place(x=400,y=400)
'''Below function requires changes for different users'''
def send_text() :
text="Thank you for shopping with us! Here's your bill: "
for i in range(len(curr_name)):
text+=str(curr_name[i])+" - Rs."+str(curr_amount[i])+"\n"
total_amount=0
for k in curr_amount :
total_amount=total_amount+k
text+="Total: "+str(total_amount)
from twilio.rest import Client
'''Create Twilio Account to get account_sid and auth_token'''
account_sid = 'Account_sid'
auth_token = 'Acc_Token'
client = Client(account_sid, auth_token)
'''from_ = 'whatsapp:+the number assigned by twilio','''
message = client.messages.create(
from_='whatsapp:+000000000',
body=text,
to='whatsapp:+91'+curr_phone[0]
)
print(message.sid)
def subm() :
global ind
overall_user.extend(curr_user)
overall_phone.extend(curr_phone)
overall_date.extend(curr_date)
overall_time.extend(curr_time)
overall_name.extend(curr_name)
overall_price.extend(curr_price)
overall_quantity.extend(curr_quantity)
overall_amount.extend(curr_amount)
overall_cno.extend(curr_cno)
df=pd.DataFrame({"UserName":overall_user,"Phone":overall_phone,"Date":overall_date,"Time":overall_time,"Name":overall_name,"Price":overall_price,"Quantity":overall_quantity,"Amount":overall_amount,"Customer No" : overall_cno })
df.to_csv("./database.csv",index=False)
ans=0
for k in curr_amount :
ans=ans+k
op=tk.Label(window1,text="Submission successful. Thank you for shopping! Click below button to print bill",bg="Light blue", fg=mycolor)
op.place(x=50,y=ind+50)
op1=tk.Label(window1,text=("Total amount : "+ str(ans) + "Rs."),bg="Light blue", fg=mycolor)
op1.place(x=50,y=ind+80)
button1=tk.Button(window1,text="Print Bill",command=print_bill, fg="White", bg=mycolor)
button1.place(x=0,y=400)
send_text()
button3=tk.Button(window1,text="Submit",command=subm, fg="White", bg=mycolor)
button3.place(x=400,y=460)
lg=[]
def recm() :
df_new=pd.read_csv("./database.csv")
for i in range(cno+1) :
lg=[]
for z in df_new.index :
if df_new.iloc[z][8]==i :
lg.append(df_new.iloc[z][4])
arrec.append(lg)
booldata=te.fit(arrec).transform(arrec)
dff_new=pd.DataFrame(booldata,columns=te.columns_)
freq_items=apriori(dff_new,min_support=0.05,use_colnames=True)
freq_items['Length']=freq_items['itemsets'].apply(lambda x: len(x))
recc=freq_items[(freq_items['Length']>=2) & (freq_items['support']>=0.02)]
op=(recc.iloc[:,1].to_string(index=False)).split('\n')
window_rec=tk.Tk()
window_rec.title("Recommendations")
window_rec.configure(background=mycolor)
window_rec.geometry('300x300')
for zz in op :
l1=tk.Label(window_rec,text=zz,fg="White", bg=mycolor)
l1.pack()
button4=tk.Button(window1,text="Recommend",command=recm,fg="White", bg=mycolor)
button4.place(x=400,y=490)
f=0
def det() :
w11=tk.Tk()
w11.title("Find Details")
w11.configure(background=mycolor)
w11.geometry('600x600')
l12=tk.Label(w11,text="Username",fg="White", bg=mycolor)
l12.place(x=100,y=50)
e12=tk.Entry(w11)
e12.place(x=160,y=50)
l22=tk.Label(w11,text="Phone",fg="White", bg=mycolor)
l22.place(x=100,y=80)
e22=tk.Entry(w11)
e22.place(x=160,y=80)
def det2() :
df_d=pd.read_csv("./database.csv")
global det_ind
zzz=e12.get()
yyy=e22.get()
laa1=tk.Label(w11,text="Date",fg="White", bg=mycolor)
laa2=tk.Label(w11,text="Time",fg="White", bg=mycolor)
laa3=tk.Label(w11,text="Product",fg="White", bg=mycolor)
laa4=tk.Label(w11,text="Price",fg="White", bg=mycolor)
laa5=tk.Label(w11,text="Quantity",fg="White", bg=mycolor)
laa6=tk.Label(w11,text="Amount",fg="White", bg=mycolor)
laa1.place(x=30,y=160)
laa2.place(x=100,y=160)
laa3.place(x=170,y=160)
laa4.place(x=240,y=160)
laa5.place(x=310,y=160)
laa6.place(x=380,y=160)
global f
for j in df_d.index :
if (df_d.iloc[j][0]==zzz) & (df_d.iloc[j][1]==int(yyy)) :
| f=1
la1=tk.Label(w11,text=df_d.iloc[j][2],fg="White", bg=mycolor)
la2=tk.Label(w11,text=df_d.iloc[j][3],fg="White", bg=mycolor)
la3=tk.Label(w11,text=df_d.iloc[j][4],fg="White", bg=mycolor)
la4=tk.Label(w11,text=df_d.iloc[j][5],fg="White", bg=mycolor)
la5=tk.Label(w11,text=df_d.iloc[j][6],fg="White", bg=mycolor)
la6=tk.Label(w11,text=df_d.iloc[j][7],fg="White", bg=mycolor)
la1.place(x=30,y=det_ind)
la2.place(x=100,y=det_ind)
la3.place(x=170,y=det_ind)
la4.place(x=240,y=det_ind)
la5.place(x=310,y=det_ind)
la6.place(x=380,y=det_ind)
det_ind=det_ind+30 | conditional_block | |
main.py | # now finally perform the auto completion
if self._hits:
self.delete(0,tk.END)
self.insert(0,self._hits[self._hit_index])
self.select_range(self.position,tk.END)
entry1.delete(0,tk.END)
entry1.insert(0,self.get())
def handle_keyrelease(self, event):
"""event handler for the keyrelease event on this widget"""
if event.keysym == "BackSpace":
self.delete(self.index(tk.INSERT), tk.END)
self.position = self.index(tk.END)
if event.keysym == "Left":
if self.position < self.index(tk.END): # delete the selection
self.delete(self.position, tk.END)
else:
self.position = self.position-1 # delete one character
self.delete(self.position, tk.END)
if event.keysym == "Right":
self.position = self.index(tk.END) # go to end (no selection)
if event.keysym == "Down":
self.autocomplete(1) # cycle to next hit
if event.keysym == "Up":
self.autocomplete(-1) # cycle to previous hit
if len(event.keysym) == 1 or event.keysym in tkinter_umlauts:
self.autocomplete()
overall_user=dff.iloc[:,0]
overall_user=np.array(overall_user)
overall_user=list(overall_user)
overall_phone=dff.iloc[:,1]
overall_phone=np.array(overall_phone)
overall_phone=list(overall_phone)
overall_date=dff.iloc[:,2]
overall_date=np.array(overall_date)
overall_date=list(overall_date)
overall_time=dff.iloc[:,3]
overall_time=np.array(overall_time)
overall_time=list(overall_time)
overall_name=dff.iloc[:,4]
overall_name=np.array(overall_name)
overall_name=list(overall_name)
overall_price=dff.iloc[:,5]
overall_price=np.array(overall_price)
overall_price=list(overall_price)
overall_quantity=dff.iloc[:,6]
overall_quantity=np.array(overall_quantity)
overall_quantity=list(overall_quantity)
overall_amount=dff.iloc[:,7]
overall_amount=np.array(overall_amount)
overall_amount=list(overall_amount)
overall_cno=dff.iloc[:,8]
overall_cno=np.array(overall_cno)
overall_cno=list(overall_cno)
cno=dff["Customer No"][len(overall_cno)-1] + 1
curr_user=[]
curr_phone=[]
curr_date=[]
curr_time=[]
curr_name=[]
curr_price=[]
curr_quantity=[]
curr_amount=[]
curr_cno=[]
def print_bill():
if os.path.isfile('print.txt'):
os.remove('print.txt')
with open('print.txt','a') as file:
file.write('\t\tThank you for shopping\t\t\n')
file.write('\t\t-----------------------\t\t\n')
file.write(f'{curr_date[0]}\t\t\t{curr_time[0]}\n')
file.write(f'Customer Name: {curr_user[0]}\n')
file.write(f'Customer Phone: {curr_phone[0]}\n')
file.write('Product\t\t\tQuantity\t\tPrice\t\t\tAmount\n')
for i in range(len(curr_name)):
with open('print.txt','a') as file:
file.write(f'{curr_name[i]}\t\t\t{curr_quantity[i]}\t\t\t{curr_price[i]}\t\t\t{curr_amount[i]}\n')
with open('print.txt','a') as file:
file.write(f'Payable Amount:\tRs.{sum(curr_amount)}\n')
os.startfile("print.txt", "print") #print bill using printer
window1=tk.Tk()
window1.configure(background="Light blue")
window1.title("Supermarket Recommendation System")
window1.geometry('600x600')
now = datetime.datetime.now()
date=now.strftime("%Y-%m-%d")
time=now.strftime("%H:%M:%S")
timee=tk.Label(window1,text=time, bg="Light blue", fg=mycolor)
timee.place(x=200,y=15)
datee=tk.Label(window1,text=date,bg="Light blue", fg=mycolor)
datee.place(x=300,y=15)
e11=tk.Label(window1,text="Name : ",bg="Light blue", fg=mycolor)
e11.place(x=50,y=45)
e22=tk.Label(window1,text="Phone Number : ",bg="Light blue", fg=mycolor)
e22.place(x=270,y=45)
e1=tk.Entry(window1)
e1.place(x=100,y=45)
e2=tk.Entry(window1)
e2.place(x=380,y=45)
l1=tk.Label(window1,text="Item name",bg="Light blue", fg=mycolor)
l1.place(x=10, y=80)
l2=tk.Label(window1,text="Price",bg="Light blue", fg=mycolor)
l2.place(x=110, y=80)
l3=tk.Label(window1,text="Quantity",bg="Light blue", fg=mycolor)
l3.place(x=210, y=80)
l3=tk.Label(window1,text="Amount",bg="Light blue", fg=mycolor)
l3.place(x=310, y=80)
def store() :
global added_count
added_count=added_count+1
global e1,e2
usern=e1.get()
phno=e2.get()
x=entry1.get()
y=entry2.get()
z=entry3.get()
y=int(y)
z=int(z)
w=z*y
l4=tk.Label(window1,text=(str(w)+"Rs."),bg="Light blue", fg=mycolor)
l4.place(x=310,y=ind)
l5=tk.Label(window1,text="Added.",bg="Light blue", fg=mycolor)
l5.place(x=410,y=ind)
curr_user.append(usern)
curr_phone.append(phno)
curr_date.append(date)
curr_time.append(time)
curr_name.append(x)
curr_price.append(y)
curr_quantity.append(z)
curr_amount.append(w)
curr_cno.append(cno)
def newent() :
global newent_count
newent_count=newent_count+1
if(newent_count!=added_count+1 and newent_count!=0):
store()
global ind
ind=ind+20
global entry1,entry2,entry3
entry1=tk.Entry(window1)
entry1.place(x=10,y=ind)
entry = AutocompleteEntry(entry1)
test_list=list(set(pd.read_csv("./database.csv")['Name']))
if(np.nan in test_list):
test_list.remove(np.nan)
entry.set_completion_list(test_list)
entry.pack()
entry.focus_set()
entry2=tk.Entry(window1)
entry2.place(x=110,y=ind)
entry3=tk.Entry(window1)
entry3.place(x=210,y=ind)
button1=tk.Button(window1,text="Add",command=store,fg="White", bg=mycolor)
button1.place(x=400,y=430)
button1=tk.Button(window1,text="New item",command=newent, fg="White", bg=mycolor)
button1.place(x=400,y=400)
'''Below function requires changes for different users'''
def send_text() :
text="Thank you for shopping with us! Here's your bill: "
for i in range(len(curr_name)):
text+=str(curr_name[i])+" - Rs."+str(curr_amount[i])+"\n"
total_amount=0
for k in curr_amount :
total_amount=total_amount+k
text+="Total: "+str(total_amount)
from twilio.rest import Client
'''Create Twilio Account to get account_sid and auth_token'''
account_sid = 'Account_sid'
auth_token = 'Acc_Token'
client = Client(account_sid, auth_token)
'''from_ = 'whatsapp:+the number assigned by twilio','''
message = client.messages.create(
from_='whatsapp:+000000000',
body=text,
to='whatsapp:+91'+curr_phone[0]
)
print(message.sid)
def | () :
global ind
overall_user.extend(curr_user)
overall_phone.extend(curr_phone)
overall_date.extend(curr_date)
overall_time.extend(curr_time)
overall_name.extend(curr_name)
overall_price.extend(curr_price)
overall_quantity.extend(curr_quantity)
overall_amount.extend(curr_amount)
overall_cno.extend(curr_cno)
df=pd.DataFrame({"UserName":overall_user,"Phone":overall_phone,"Date":overall_date,"Time":overall_time,"Name":overall_name,"Price":overall_price,"Quantity":overall_quantity,"Amount":overall_amount,"Customer No" : overall_cno })
df.to_csv("./database.csv",index=False)
ans=0
for k in curr_amount :
ans=ans+k
op=tk.Label(window1,text="Submission successful. Thank you for shopping! Click below button to print bill",bg="Light blue", fg=mycolor)
op.place(x=50,y=ind+50)
op1=tk.Label(window1,text=("Total amount : "+ str(ans) + "Rs."),bg="Light blue", fg=mycolor)
op | subm | identifier_name |
main.py |
def autocomplete(self, delta=0):
"""autocomplete the Entry, delta may be 0/1/-1 to cycle through possible hits"""
if delta: # need to delete selection otherwise we would fix the current position
self.delete(self.position, tk.END)
else: # set position to end so selection starts where textentry ended
self.position = len(self.get())
# collect hits
_hits = []
for element in self._completion_list:
if element.lower().startswith(self.get().lower()): # Match case-insensitively
_hits.append(element)
# if we have a new hit list, keep this in mind
if _hits != self._hits:
self._hit_index = 0
self._hits=_hits
# only allow cycling if we are in a known hit list
if _hits == self._hits and self._hits:
self._hit_index = (self._hit_index + delta) % len(self._hits)
# now finally perform the auto completion
if self._hits:
self.delete(0,tk.END)
self.insert(0,self._hits[self._hit_index])
self.select_range(self.position,tk.END)
entry1.delete(0,tk.END)
entry1.insert(0,self.get())
def handle_keyrelease(self, event):
"""event handler for the keyrelease event on this widget"""
if event.keysym == "BackSpace":
self.delete(self.index(tk.INSERT), tk.END)
self.position = self.index(tk.END)
if event.keysym == "Left":
if self.position < self.index(tk.END): # delete the selection
self.delete(self.position, tk.END)
else:
self.position = self.position-1 # delete one character
self.delete(self.position, tk.END)
if event.keysym == "Right":
self.position = self.index(tk.END) # go to end (no selection)
if event.keysym == "Down":
self.autocomplete(1) # cycle to next hit
if event.keysym == "Up":
self.autocomplete(-1) # cycle to previous hit
if len(event.keysym) == 1 or event.keysym in tkinter_umlauts:
self.autocomplete()
overall_user=dff.iloc[:,0]
overall_user=np.array(overall_user)
overall_user=list(overall_user)
overall_phone=dff.iloc[:,1]
overall_phone=np.array(overall_phone)
overall_phone=list(overall_phone)
overall_date=dff.iloc[:,2]
overall_date=np.array(overall_date)
overall_date=list(overall_date)
overall_time=dff.iloc[:,3]
overall_time=np.array(overall_time)
overall_time=list(overall_time)
overall_name=dff.iloc[:,4]
overall_name=np.array(overall_name)
overall_name=list(overall_name)
overall_price=dff.iloc[:,5]
overall_price=np.array(overall_price)
overall_price=list(overall_price)
overall_quantity=dff.iloc[:,6]
overall_quantity=np.array(overall_quantity)
overall_quantity=list(overall_quantity)
overall_amount=dff.iloc[:,7]
overall_amount=np.array(overall_amount)
overall_amount=list(overall_amount)
overall_cno=dff.iloc[:,8]
overall_cno=np.array(overall_cno)
overall_cno=list(overall_cno)
cno=dff["Customer No"][len(overall_cno)-1] + 1
curr_user=[]
curr_phone=[]
curr_date=[]
curr_time=[]
curr_name=[]
curr_price=[]
curr_quantity=[]
curr_amount=[]
curr_cno=[]
def print_bill():
if os.path.isfile('print.txt'):
os.remove('print.txt')
with open('print.txt','a') as file:
file.write('\t\tThank you for shopping\t\t\n')
file.write('\t\t-----------------------\t\t\n')
file.write(f'{curr_date[0]}\t\t\t{curr_time[0]}\n')
file.write(f'Customer Name: {curr_user[0]}\n')
file.write(f'Customer Phone: {curr_phone[0]}\n')
file.write('Product\t\t\tQuantity\t\tPrice\t\t\tAmount\n')
for i in range(len(curr_name)):
with open('print.txt','a') as file:
file.write(f'{curr_name[i]}\t\t\t{curr_quantity[i]}\t\t\t{curr_price[i]}\t\t\t{curr_amount[i]}\n')
with open('print.txt','a') as file:
file.write(f'Payable Amount:\tRs.{sum(curr_amount)}\n')
os.startfile("print.txt", "print") #print bill using printer
window1=tk.Tk()
window1.configure(background="Light blue")
window1.title("Supermarket Recommendation System")
window1.geometry('600x600')
now = datetime.datetime.now()
date=now.strftime("%Y-%m-%d")
time=now.strftime("%H:%M:%S")
timee=tk.Label(window1,text=time, bg="Light blue", fg=mycolor)
timee.place(x=200,y=15)
datee=tk.Label(window1,text=date,bg="Light blue", fg=mycolor)
datee.place(x=300,y=15)
e11=tk.Label(window1,text="Name : ",bg="Light blue", fg=mycolor)
e11.place(x=50,y=45)
e22=tk.Label(window1,text="Phone Number : ",bg="Light blue", fg=mycolor)
e22.place(x=270,y=45)
e1=tk.Entry(window1)
e1.place(x=100,y=45)
e2=tk.Entry(window1)
e2.place(x=380,y=45)
l1=tk.Label(window1,text="Item name",bg="Light blue", fg=mycolor)
l1.place(x=10, y=80)
l2=tk.Label(window1,text="Price",bg="Light blue", fg=mycolor)
l2.place(x=110, y=80)
l3=tk.Label(window1,text="Quantity",bg="Light blue", fg=mycolor)
l3.place(x=210, y=80)
l3=tk.Label(window1,text="Amount",bg="Light blue", fg=mycolor)
l3.place(x=310, y=80)
def store() :
global added_count
added_count=added_count+1
global e1,e2
usern=e1.get()
phno=e2.get()
x=entry1.get()
y=entry2.get()
z=entry3.get()
y=int(y)
z=int(z)
w=z*y
l4=tk.Label(window1,text=(str(w)+"Rs."),bg="Light blue", fg=mycolor)
l4.place(x=310,y=ind)
l5=tk.Label(window1,text="Added.",bg="Light blue", fg=mycolor)
l5.place(x=410,y=ind)
curr_user.append(usern)
curr_phone.append(phno)
curr_date.append(date)
curr_time.append(time)
curr_name.append(x)
curr_price.append(y)
curr_quantity.append(z)
curr_amount.append(w)
curr_cno.append(cno)
def newent() :
global newent_count
newent_count=newent_count+1
if(newent_count!=added_count+1 and newent_count!=0):
store()
global ind
ind=ind+20
global entry1,entry2,entry3
entry1=tk.Entry(window1)
entry1.place(x=10,y=ind)
entry = AutocompleteEntry(entry1)
test_list=list(set(pd.read_csv("./database.csv")['Name']))
if(np.nan in test_list):
test_list.remove(np.nan)
entry.set_completion_list(test_list)
entry.pack()
entry.focus_set()
entry2=tk.Entry(window1)
entry2.place(x=110,y=ind)
entry3=tk.Entry(window1)
entry3.place(x=210,y=ind)
button1=tk.Button(window1,text="Add",command=store,fg="White", bg=mycolor)
button1.place(x=400,y=430)
button1=tk.Button(window1,text="New item",command=newent, fg="White", bg=mycolor)
button1.place(x=400,y=400)
'''Below function requires changes for different users'''
def send_text() :
text="Thank you for shopping with us! Here's your bill: "
for i in range(len(curr_name)):
text+=str(curr_name[i])+" - Rs."+str(curr_amount[i])+"\n"
total_amount=0
for k in curr_amount :
total_amount=total_amount+k
text+="Total: "+str(total_amount)
from twilio.rest import Client
'''Create Twilio Account to get account_sid and auth_token'''
account_sid = 'Account_sid'
auth_token = 'Acc_Token'
client = Client(account_sid, auth_token)
'''from_ = 'whatsapp:+the number assigned by twilio','''
message = client.messages.create(
from_='whatsapp:+0 | self._completion_list = sorted(completion_list, key=str.lower) # Work with a sorted list
self._hits = []
self._hit_index = 0
self.position = 0
self.bind('<KeyRelease>', self.handle_keyrelease) | identifier_body | |
main.py | from functools import partial
import requests
import pandas as pd
import numpy as np
import sys
import os
import tkinter.ttk
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori
te=TransactionEncoder()
dff=pd.read_csv("./database.csv")
ind=110
det_ind=200
arrec=[]
mycolor = '#%02x%02x%02x' % (50, 50, 50)
added_count=0
newent_count=0
twilio_account_id="API Key"
tkinter_umlauts=['odiaeresis', 'adiaeresis', 'udiaeresis', 'Odiaeresis', 'Adiaeresis', 'Udiaeresis', 'ssharp']
class AutocompleteEntry(tk.Entry):
"""
Subclass of tk.Entry that features autocompletion.
To enable autocompletion use set_completion_list(list) to define
a list of possible strings to hit.
To cycle through hits use down and up arrow keys.
"""
def set_completion_list(self, completion_list):
self._completion_list = sorted(completion_list, key=str.lower) # Work with a sorted list
self._hits = []
self._hit_index = 0
self.position = 0
self.bind('<KeyRelease>', self.handle_keyrelease)
def autocomplete(self, delta=0):
"""autocomplete the Entry, delta may be 0/1/-1 to cycle through possible hits"""
if delta: # need to delete selection otherwise we would fix the current position
self.delete(self.position, tk.END)
else: # set position to end so selection starts where textentry ended
self.position = len(self.get())
# collect hits
_hits = []
for element in self._completion_list:
if element.lower().startswith(self.get().lower()): # Match case-insensitively
_hits.append(element)
# if we have a new hit list, keep this in mind
if _hits != self._hits:
self._hit_index = 0
self._hits=_hits
# only allow cycling if we are in a known hit list
if _hits == self._hits and self._hits:
self._hit_index = (self._hit_index + delta) % len(self._hits)
# now finally perform the auto completion
if self._hits:
self.delete(0,tk.END)
self.insert(0,self._hits[self._hit_index])
self.select_range(self.position,tk.END)
entry1.delete(0,tk.END)
entry1.insert(0,self.get())
def handle_keyrelease(self, event):
"""event handler for the keyrelease event on this widget"""
if event.keysym == "BackSpace":
self.delete(self.index(tk.INSERT), tk.END)
self.position = self.index(tk.END)
if event.keysym == "Left":
if self.position < self.index(tk.END): # delete the selection
self.delete(self.position, tk.END)
else:
self.position = self.position-1 # delete one character
self.delete(self.position, tk.END)
if event.keysym == "Right":
self.position = self.index(tk.END) # go to end (no selection)
if event.keysym == "Down":
self.autocomplete(1) # cycle to next hit
if event.keysym == "Up":
self.autocomplete(-1) # cycle to previous hit
if len(event.keysym) == 1 or event.keysym in tkinter_umlauts:
self.autocomplete()
overall_user=dff.iloc[:,0]
overall_user=np.array(overall_user)
overall_user=list(overall_user)
overall_phone=dff.iloc[:,1]
overall_phone=np.array(overall_phone)
overall_phone=list(overall_phone)
overall_date=dff.iloc[:,2]
overall_date=np.array(overall_date)
overall_date=list(overall_date)
overall_time=dff.iloc[:,3]
overall_time=np.array(overall_time)
overall_time=list(overall_time)
overall_name=dff.iloc[:,4]
overall_name=np.array(overall_name)
overall_name=list(overall_name)
overall_price=dff.iloc[:,5]
overall_price=np.array(overall_price)
overall_price=list(overall_price)
overall_quantity=dff.iloc[:,6]
overall_quantity=np.array(overall_quantity)
overall_quantity=list(overall_quantity)
overall_amount=dff.iloc[:,7]
overall_amount=np.array(overall_amount)
overall_amount=list(overall_amount)
overall_cno=dff.iloc[:,8]
overall_cno=np.array(overall_cno)
overall_cno=list(overall_cno)
cno=dff["Customer No"][len(overall_cno)-1] + 1
curr_user=[]
curr_phone=[]
curr_date=[]
curr_time=[]
curr_name=[]
curr_price=[]
curr_quantity=[]
curr_amount=[]
curr_cno=[]
def print_bill():
if os.path.isfile('print.txt'):
os.remove('print.txt')
with open('print.txt','a') as file:
file.write('\t\tThank you for shopping\t\t\n')
file.write('\t\t-----------------------\t\t\n')
file.write(f'{curr_date[0]}\t\t\t{curr_time[0]}\n')
file.write(f'Customer Name: {curr_user[0]}\n')
file.write(f'Customer Phone: {curr_phone[0]}\n')
file.write('Product\t\t\tQuantity\t\tPrice\t\t\tAmount\n')
for i in range(len(curr_name)):
with open('print.txt','a') as file:
file.write(f'{curr_name[i]}\t\t\t{curr_quantity[i]}\t\t\t{curr_price[i]}\t\t\t{curr_amount[i]}\n')
with open('print.txt','a') as file:
file.write(f'Payable Amount:\tRs.{sum(curr_amount)}\n')
os.startfile("print.txt", "print") #print bill using printer
window1=tk.Tk()
window1.configure(background="Light blue")
window1.title("Supermarket Recommendation System")
window1.geometry('600x600')
now = datetime.datetime.now()
date=now.strftime("%Y-%m-%d")
time=now.strftime("%H:%M:%S")
timee=tk.Label(window1,text=time, bg="Light blue", fg=mycolor)
timee.place(x=200,y=15)
datee=tk.Label(window1,text=date,bg="Light blue", fg=mycolor)
datee.place(x=300,y=15)
e11=tk.Label(window1,text="Name : ",bg="Light blue", fg=mycolor)
e11.place(x=50,y=45)
e22=tk.Label(window1,text="Phone Number : ",bg="Light blue", fg=mycolor)
e22.place(x=270,y=45)
e1=tk.Entry(window1)
e1.place(x=100,y=45)
e2=tk.Entry(window1)
e2.place(x=380,y=45)
l1=tk.Label(window1,text="Item name",bg="Light blue", fg=mycolor)
l1.place(x=10, y=80)
l2=tk.Label(window1,text="Price",bg="Light blue", fg=mycolor)
l2.place(x=110, y=80)
l3=tk.Label(window1,text="Quantity",bg="Light blue", fg=mycolor)
l3.place(x=210, y=80)
l3=tk.Label(window1,text="Amount",bg="Light blue", fg=mycolor)
l3.place(x=310, y=80)
def store() :
global added_count
added_count=added_count+1
global e1,e2
usern=e1.get()
phno=e2.get()
x=entry1.get()
y=entry2.get()
z=entry3.get()
y=int(y)
z=int(z)
w=z*y
l4=tk.Label(window1,text=(str(w)+"Rs."),bg="Light blue", fg=mycolor)
l4.place(x=310,y=ind)
l5=tk.Label(window1,text="Added.",bg="Light blue", fg=mycolor)
l5.place(x=410,y=ind)
curr_user.append(usern)
curr_phone.append(phno)
curr_date.append(date)
curr_time.append(time)
curr_name.append(x)
curr_price.append(y)
curr_quantity.append(z)
curr_amount.append(w)
curr_cno.append(cno)
def newent() :
global newent_count
newent_count=newent_count+1
if(newent_count!=added_count+1 and newent_count!=0):
store()
global ind
ind=ind+20
global entry1,entry2,entry3
entry1=tk.Entry(window1)
entry1.place(x=10,y=ind)
entry = AutocompleteEntry(entry1)
test_list=list(set(pd.read_csv("./database.csv")['Name']))
if(np.nan in test_list):
test_list.remove(np.nan)
entry.set_completion_list(test_list)
entry.pack()
entry.focus_set()
entry2=tk.Entry(window1)
entry2.place(x=110,y=ind)
entry3=tk.Entry(window1)
entry3.place(x=210,y=ind)
button1=tk.Button(window1,text | import datetime | random_line_split | |
MapServiceDownload.py | import smtplib
import arcpy
import json
import urllib
import urllib2
import uuid
import math
# Enable data to be overwritten
arcpy.env.overwriteOutput = True
# Set global variables
enableLogging = "false" # Use logger.info("Example..."), logger.warning("Example..."), logger.error("Example...")
logFile = "" # os.path.join(os.path.dirname(__file__), "Example.log")
sendErrorEmail = "false"
emailTo = ""
emailUser = ""
emailPassword = ""
emailSubject = ""
emailMessage = ""
enableProxy = "false"
requestProtocol = "http" # http or https
proxyURL = ""
output = None
# Start of main function
def mainFunction(mapServiceLayer,outputFeatureClass,updateMode): # Get parameters from ArcGIS Desktop tool by seperating by comma e.g. (var1 is 1st parameter,var2 is 2nd parameter,var3 is 3rd parameter)
try:
# --------------------------------------- Start of code --------------------------------------- #
# Querying thet map service to get the count of records
arcpy.AddMessage("Querying the map service...")
mapServiceQuery1 = mapServiceLayer + "/query?where=1%3D1&returnIdsOnly=true&f=pjson"
urlResponse = urllib.urlopen(mapServiceQuery1);
# Get json for the response - Object IDs
mapServiceQuery1JSONData = json.loads(urlResponse.read())
objectIDs = mapServiceQuery1JSONData["objectIds"]
objectIDs.sort()
arcpy.AddMessage("Number of records in the layer - " + str(len(objectIDs)) + "...")
# Set the number of records per request and the number of requests that need to be made
maxRecords = 1000
# If under maxRecords, just need to make one request
if (len(objectIDs) < maxRecords):
requestsToMake = 1
else:
# Calculate the number of requests - Always round up
requestsToMake = math.ceil(float(len(objectIDs)) / float(maxRecords))
arcpy.AddMessage("Downloading data to " + arcpy.env.scratchFolder + "...")
# For every request
count = 0
while (int(requestsToMake) > count):
# Create the query
startObjectID = int(objectIDs[count*maxRecords])
# If at the final request or if there is only one request that needs to be made
if ((int(requestsToMake) == (count+1)) or (requestsToMake == 1)):
# Get the last object ID
endObjectID = int(objectIDs[len(objectIDs)-1])
serviceQuery = "OBJECTID>%3D" + str(startObjectID) + "+AND+OBJECTID<%3D" + str(endObjectID)
else:
# Start object ID plus 1000 records
endObjectID = int(objectIDs[(count*maxRecords)+maxRecords])
serviceQuery = "OBJECTID>%3D" + str(startObjectID) + "+AND+OBJECTID<" + str(endObjectID)
# Query the map service to data in json format
try: | # Download the data
fileChunk = 16 * 1024
downloadedFile = os.path.join(arcpy.env.scratchFolder, "Data-" + str(uuid.uuid1()) + ".json")
with open(downloadedFile, 'wb') as file:
downloadCount = 0
while True:
chunk = response.read(fileChunk)
# If data size is small
if ((downloadCount == 0) and (len(chunk) < 1000)):
# Log error and end download
arcpy.AddError("No data returned, check the URL...")
sys.exit()
if not chunk:
break
# Write chunk to output file
file.write(chunk)
downloadCount = downloadCount + 1
file.close()
# If it's the first request
if (count == 0):
# Create new dataset
arcpy.JSONToFeatures_conversion(downloadedFile, os.path.join(arcpy.env.scratchGDB, "Dataset"))
else:
# Create dataset and load into existing
arcpy.JSONToFeatures_conversion(downloadedFile, "in_memory\\DatasetTemp")
arcpy.Append_management("in_memory\\DatasetTemp", os.path.join(arcpy.env.scratchGDB, "Dataset"), "NO_TEST", "", "")
# If at the final request or if there is only one request that needs to be made
if ((int(requestsToMake) == (count+1)) or (requestsToMake == 1)):
arcpy.AddMessage("Downloaded and converted JSON for " + str(len(objectIDs)) + " of " + str(len(objectIDs)) + " features...")
else:
arcpy.AddMessage("Downloaded and converted JSON for " + str((count+1)*maxRecords) + " of " + str(len(objectIDs)) + " features...")
count = count + 1
# Convert JSON to feature class
arcpy.AddMessage("Copying over final dataset...")
# Overwrite dataset
if (updateMode.lower() == "new"):
# Get record count
recordCount = arcpy.GetCount_management(os.path.join(arcpy.env.scratchGDB, "Dataset"))
arcpy.AddMessage("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Logging
if (enableLogging == "true"):
# Log record count
logger.info("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Load in data
if (recordCount > 0):
arcpy.CopyFeatures_management(os.path.join(arcpy.env.scratchGDB, "Dataset"), outputFeatureClass, "", "0", "0", "0")
# Delete and append
else:
# Get record count
recordCount = arcpy.GetCount_management(os.path.join(arcpy.env.scratchGDB, "Dataset"))
arcpy.AddMessage("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Logging
if (enableLogging == "true"):
# Log record count
logger.info("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Load in data
if (recordCount > 0):
arcpy.DeleteFeatures_management(outputFeatureClass)
arcpy.Append_management(os.path.join(arcpy.env.scratchGDB, "Dataset"), outputFeatureClass, "NO_TEST", "", "")
# --------------------------------------- End of code --------------------------------------- #
# If called from gp tool return the arcpy parameter
if __name__ == '__main__':
# Return the output if there is any
if output:
arcpy.SetParameterAsText(1, output)
# Otherwise return the result
else:
# Return the output if there is any
if output:
return output
# Logging
if (enableLogging == "true"):
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
# If arcpy error
except arcpy.ExecuteError:
# Build and show the error message
errorMessage = arcpy.GetMessages(2)
arcpy.AddError(errorMessage)
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
if (sendErrorEmail == "true"):
# Send email
sendEmail(errorMessage)
# If python error
except Exception as e:
errorMessage = ""
# Build and show the error message
for i in range(len(e.args)):
if (i == 0):
errorMessage = unicode(e.args[i]).encode('utf-8')
else:
errorMessage = errorMessage + " " + unicode(e.args[i]).encode('utf-8')
arcpy.AddError(errorMessage)
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
if (sendErrorEmail == "true"):
# Send email
sendEmail(errorMessage)
# End of main function
# Start of set logging function
def setLogging(logFile):
# Create a logger
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.DEBUG)
# Setup log message handler
logMessage = logging.FileHandler(logFile)
# Setup the log formatting
logFormat = logging.Formatter("%(asctime)s: %(levelname)s - %(message)s", "%d/%m/%Y - %H:%M:%S")
# Add formatter to log message handler
logMessage.setFormatter | mapServiceQuery2 = mapServiceLayer + "/query?where=" + serviceQuery + "&returnCountOnly=false&returnIdsOnly=false&returnGeometry=true&outFields=*&f=pjson"
response = urllib2.urlopen(mapServiceQuery2)
except urllib2.URLError, e:
arcpy.AddError("There was an error: %r" % e)
| random_line_split |
MapServiceDownload.py | data to be overwritten
arcpy.env.overwriteOutput = True
# Set global variables
enableLogging = "false" # Use logger.info("Example..."), logger.warning("Example..."), logger.error("Example...")
logFile = "" # os.path.join(os.path.dirname(__file__), "Example.log")
sendErrorEmail = "false"
emailTo = ""
emailUser = ""
emailPassword = ""
emailSubject = ""
emailMessage = ""
enableProxy = "false"
requestProtocol = "http" # http or https
proxyURL = ""
output = None
# Start of main function
def mainFunction(mapServiceLayer,outputFeatureClass,updateMode): # Get parameters from ArcGIS Desktop tool by seperating by comma e.g. (var1 is 1st parameter,var2 is 2nd parameter,var3 is 3rd parameter)
try:
# --------------------------------------- Start of code --------------------------------------- #
# Querying thet map service to get the count of records
arcpy.AddMessage("Querying the map service...")
mapServiceQuery1 = mapServiceLayer + "/query?where=1%3D1&returnIdsOnly=true&f=pjson"
urlResponse = urllib.urlopen(mapServiceQuery1);
# Get json for the response - Object IDs
mapServiceQuery1JSONData = json.loads(urlResponse.read())
objectIDs = mapServiceQuery1JSONData["objectIds"]
objectIDs.sort()
arcpy.AddMessage("Number of records in the layer - " + str(len(objectIDs)) + "...")
# Set the number of records per request and the number of requests that need to be made
maxRecords = 1000
# If under maxRecords, just need to make one request
if (len(objectIDs) < maxRecords):
requestsToMake = 1
else:
# Calculate the number of requests - Always round up
requestsToMake = math.ceil(float(len(objectIDs)) / float(maxRecords))
arcpy.AddMessage("Downloading data to " + arcpy.env.scratchFolder + "...")
# For every request
count = 0
while (int(requestsToMake) > count):
# Create the query
startObjectID = int(objectIDs[count*maxRecords])
# If at the final request or if there is only one request that needs to be made
if ((int(requestsToMake) == (count+1)) or (requestsToMake == 1)):
# Get the last object ID
endObjectID = int(objectIDs[len(objectIDs)-1])
serviceQuery = "OBJECTID>%3D" + str(startObjectID) + "+AND+OBJECTID<%3D" + str(endObjectID)
else:
# Start object ID plus 1000 records
endObjectID = int(objectIDs[(count*maxRecords)+maxRecords])
serviceQuery = "OBJECTID>%3D" + str(startObjectID) + "+AND+OBJECTID<" + str(endObjectID)
# Query the map service to data in json format
try:
mapServiceQuery2 = mapServiceLayer + "/query?where=" + serviceQuery + "&returnCountOnly=false&returnIdsOnly=false&returnGeometry=true&outFields=*&f=pjson"
response = urllib2.urlopen(mapServiceQuery2)
except urllib2.URLError, e:
arcpy.AddError("There was an error: %r" % e)
# Download the data
fileChunk = 16 * 1024
downloadedFile = os.path.join(arcpy.env.scratchFolder, "Data-" + str(uuid.uuid1()) + ".json")
with open(downloadedFile, 'wb') as file:
downloadCount = 0
while True:
chunk = response.read(fileChunk)
# If data size is small
if ((downloadCount == 0) and (len(chunk) < 1000)):
# Log error and end download
arcpy.AddError("No data returned, check the URL...")
sys.exit()
if not chunk:
break
# Write chunk to output file
file.write(chunk)
downloadCount = downloadCount + 1
file.close()
# If it's the first request
if (count == 0):
# Create new dataset
arcpy.JSONToFeatures_conversion(downloadedFile, os.path.join(arcpy.env.scratchGDB, "Dataset"))
else:
# Create dataset and load into existing
arcpy.JSONToFeatures_conversion(downloadedFile, "in_memory\\DatasetTemp")
arcpy.Append_management("in_memory\\DatasetTemp", os.path.join(arcpy.env.scratchGDB, "Dataset"), "NO_TEST", "", "")
# If at the final request or if there is only one request that needs to be made
if ((int(requestsToMake) == (count+1)) or (requestsToMake == 1)):
arcpy.AddMessage("Downloaded and converted JSON for " + str(len(objectIDs)) + " of " + str(len(objectIDs)) + " features...")
else:
arcpy.AddMessage("Downloaded and converted JSON for " + str((count+1)*maxRecords) + " of " + str(len(objectIDs)) + " features...")
count = count + 1
# Convert JSON to feature class
arcpy.AddMessage("Copying over final dataset...")
# Overwrite dataset
if (updateMode.lower() == "new"):
# Get record count
recordCount = arcpy.GetCount_management(os.path.join(arcpy.env.scratchGDB, "Dataset"))
arcpy.AddMessage("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Logging
if (enableLogging == "true"):
# Log record count
logger.info("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Load in data
if (recordCount > 0):
arcpy.CopyFeatures_management(os.path.join(arcpy.env.scratchGDB, "Dataset"), outputFeatureClass, "", "0", "0", "0")
# Delete and append
else:
# Get record count
recordCount = arcpy.GetCount_management(os.path.join(arcpy.env.scratchGDB, "Dataset"))
arcpy.AddMessage("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Logging
if (enableLogging == "true"):
# Log record count
logger.info("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Load in data
if (recordCount > 0):
arcpy.DeleteFeatures_management(outputFeatureClass)
arcpy.Append_management(os.path.join(arcpy.env.scratchGDB, "Dataset"), outputFeatureClass, "NO_TEST", "", "")
# --------------------------------------- End of code --------------------------------------- #
# If called from gp tool return the arcpy parameter
if __name__ == '__main__':
# Return the output if there is any
if output:
arcpy.SetParameterAsText(1, output)
# Otherwise return the result
else:
# Return the output if there is any
if output:
return output
# Logging
if (enableLogging == "true"):
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
# If arcpy error
except arcpy.ExecuteError:
# Build and show the error message
errorMessage = arcpy.GetMessages(2)
arcpy.AddError(errorMessage)
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
if (sendErrorEmail == "true"):
# Send email
sendEmail(errorMessage)
# If python error
except Exception as e:
errorMessage = ""
# Build and show the error message
for i in range(len(e.args)):
if (i == 0):
errorMessage = unicode(e.args[i]).encode('utf-8')
else:
errorMessage = errorMessage + " " + unicode(e.args[i]).encode('utf-8')
arcpy.AddError(errorMessage)
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
if (sendErrorEmail == "true"):
# Send email
sendEmail(errorMessage)
# End of main function
# Start of set logging function
def setLogging(logFile):
# Create a logger
| logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.DEBUG)
# Setup log message handler
logMessage = logging.FileHandler(logFile)
# Setup the log formatting
logFormat = logging.Formatter("%(asctime)s: %(levelname)s - %(message)s", "%d/%m/%Y - %H:%M:%S")
# Add formatter to log message handler
logMessage.setFormatter(logFormat)
# Add log message handler to logger
logger.addHandler(logMessage)
return logger, logMessage | identifier_body | |
MapServiceDownload.py | enableLogging = "false" # Use logger.info("Example..."), logger.warning("Example..."), logger.error("Example...")
logFile = "" # os.path.join(os.path.dirname(__file__), "Example.log")
sendErrorEmail = "false"
emailTo = ""
emailUser = ""
emailPassword = ""
emailSubject = ""
emailMessage = ""
enableProxy = "false"
requestProtocol = "http" # http or https
proxyURL = ""
output = None
# Start of main function
def mainFunction(mapServiceLayer,outputFeatureClass,updateMode): # Get parameters from ArcGIS Desktop tool by seperating by comma e.g. (var1 is 1st parameter,var2 is 2nd parameter,var3 is 3rd parameter)
try:
# --------------------------------------- Start of code --------------------------------------- #
# Querying thet map service to get the count of records
arcpy.AddMessage("Querying the map service...")
mapServiceQuery1 = mapServiceLayer + "/query?where=1%3D1&returnIdsOnly=true&f=pjson"
urlResponse = urllib.urlopen(mapServiceQuery1);
# Get json for the response - Object IDs
mapServiceQuery1JSONData = json.loads(urlResponse.read())
objectIDs = mapServiceQuery1JSONData["objectIds"]
objectIDs.sort()
arcpy.AddMessage("Number of records in the layer - " + str(len(objectIDs)) + "...")
# Set the number of records per request and the number of requests that need to be made
maxRecords = 1000
# If under maxRecords, just need to make one request
if (len(objectIDs) < maxRecords):
requestsToMake = 1
else:
# Calculate the number of requests - Always round up
requestsToMake = math.ceil(float(len(objectIDs)) / float(maxRecords))
arcpy.AddMessage("Downloading data to " + arcpy.env.scratchFolder + "...")
# For every request
count = 0
while (int(requestsToMake) > count):
# Create the query
startObjectID = int(objectIDs[count*maxRecords])
# If at the final request or if there is only one request that needs to be made
if ((int(requestsToMake) == (count+1)) or (requestsToMake == 1)):
# Get the last object ID
endObjectID = int(objectIDs[len(objectIDs)-1])
serviceQuery = "OBJECTID>%3D" + str(startObjectID) + "+AND+OBJECTID<%3D" + str(endObjectID)
else:
# Start object ID plus 1000 records
endObjectID = int(objectIDs[(count*maxRecords)+maxRecords])
serviceQuery = "OBJECTID>%3D" + str(startObjectID) + "+AND+OBJECTID<" + str(endObjectID)
# Query the map service to data in json format
try:
mapServiceQuery2 = mapServiceLayer + "/query?where=" + serviceQuery + "&returnCountOnly=false&returnIdsOnly=false&returnGeometry=true&outFields=*&f=pjson"
response = urllib2.urlopen(mapServiceQuery2)
except urllib2.URLError, e:
arcpy.AddError("There was an error: %r" % e)
# Download the data
fileChunk = 16 * 1024
downloadedFile = os.path.join(arcpy.env.scratchFolder, "Data-" + str(uuid.uuid1()) + ".json")
with open(downloadedFile, 'wb') as file:
downloadCount = 0
while True:
chunk = response.read(fileChunk)
# If data size is small
if ((downloadCount == 0) and (len(chunk) < 1000)):
# Log error and end download
arcpy.AddError("No data returned, check the URL...")
sys.exit()
if not chunk:
break
# Write chunk to output file
file.write(chunk)
downloadCount = downloadCount + 1
file.close()
# If it's the first request
if (count == 0):
# Create new dataset
arcpy.JSONToFeatures_conversion(downloadedFile, os.path.join(arcpy.env.scratchGDB, "Dataset"))
else:
# Create dataset and load into existing
arcpy.JSONToFeatures_conversion(downloadedFile, "in_memory\\DatasetTemp")
arcpy.Append_management("in_memory\\DatasetTemp", os.path.join(arcpy.env.scratchGDB, "Dataset"), "NO_TEST", "", "")
# If at the final request or if there is only one request that needs to be made
if ((int(requestsToMake) == (count+1)) or (requestsToMake == 1)):
arcpy.AddMessage("Downloaded and converted JSON for " + str(len(objectIDs)) + " of " + str(len(objectIDs)) + " features...")
else:
arcpy.AddMessage("Downloaded and converted JSON for " + str((count+1)*maxRecords) + " of " + str(len(objectIDs)) + " features...")
count = count + 1
# Convert JSON to feature class
arcpy.AddMessage("Copying over final dataset...")
# Overwrite dataset
if (updateMode.lower() == "new"):
# Get record count
recordCount = arcpy.GetCount_management(os.path.join(arcpy.env.scratchGDB, "Dataset"))
arcpy.AddMessage("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Logging
if (enableLogging == "true"):
# Log record count
logger.info("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Load in data
if (recordCount > 0):
arcpy.CopyFeatures_management(os.path.join(arcpy.env.scratchGDB, "Dataset"), outputFeatureClass, "", "0", "0", "0")
# Delete and append
else:
# Get record count
recordCount = arcpy.GetCount_management(os.path.join(arcpy.env.scratchGDB, "Dataset"))
arcpy.AddMessage("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Logging
if (enableLogging == "true"):
# Log record count
logger.info("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Load in data
if (recordCount > 0):
arcpy.DeleteFeatures_management(outputFeatureClass)
arcpy.Append_management(os.path.join(arcpy.env.scratchGDB, "Dataset"), outputFeatureClass, "NO_TEST", "", "")
# --------------------------------------- End of code --------------------------------------- #
# If called from gp tool return the arcpy parameter
if __name__ == '__main__':
# Return the output if there is any
if output:
arcpy.SetParameterAsText(1, output)
# Otherwise return the result
else:
# Return the output if there is any
if output:
return output
# Logging
if (enableLogging == "true"):
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
# If arcpy error
except arcpy.ExecuteError:
# Build and show the error message
errorMessage = arcpy.GetMessages(2)
arcpy.AddError(errorMessage)
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
if (sendErrorEmail == "true"):
# Send email
sendEmail(errorMessage)
# If python error
except Exception as e:
errorMessage = ""
# Build and show the error message
for i in range(len(e.args)):
if (i == 0):
errorMessage = unicode(e.args[i]).encode('utf-8')
else:
errorMessage = errorMessage + " " + unicode(e.args[i]).encode('utf-8')
arcpy.AddError(errorMessage)
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
if (sendErrorEmail == "true"):
# Send email
sendEmail(errorMessage)
# End of main function
# Start of set logging function
def setLogging(logFile):
# Create a logger
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.DEBUG)
# Setup log message handler
logMessage = logging.FileHandler(logFile)
# Setup the log formatting
logFormat = logging.Formatter("%(asctime)s: %(levelname)s - %(message)s", "%d/%m/%Y - %H:%M:%S")
# Add formatter to log message handler
logMessage.setFormatter(logFormat)
# Add log message handler to logger
logger.addHandler(logMessage)
return logger, logMessage
# End of set logging function
# Start of send email function
def | sendEmail | identifier_name | |
MapServiceDownload.py | import smtplib
import arcpy
import json
import urllib
import urllib2
import uuid
import math
# Enable data to be overwritten
arcpy.env.overwriteOutput = True
# Set global variables
enableLogging = "false" # Use logger.info("Example..."), logger.warning("Example..."), logger.error("Example...")
logFile = "" # os.path.join(os.path.dirname(__file__), "Example.log")
sendErrorEmail = "false"
emailTo = ""
emailUser = ""
emailPassword = ""
emailSubject = ""
emailMessage = ""
enableProxy = "false"
requestProtocol = "http" # http or https
proxyURL = ""
output = None
# Start of main function
def mainFunction(mapServiceLayer,outputFeatureClass,updateMode): # Get parameters from ArcGIS Desktop tool by seperating by comma e.g. (var1 is 1st parameter,var2 is 2nd parameter,var3 is 3rd parameter)
try:
# --------------------------------------- Start of code --------------------------------------- #
# Querying thet map service to get the count of records
arcpy.AddMessage("Querying the map service...")
mapServiceQuery1 = mapServiceLayer + "/query?where=1%3D1&returnIdsOnly=true&f=pjson"
urlResponse = urllib.urlopen(mapServiceQuery1);
# Get json for the response - Object IDs
mapServiceQuery1JSONData = json.loads(urlResponse.read())
objectIDs = mapServiceQuery1JSONData["objectIds"]
objectIDs.sort()
arcpy.AddMessage("Number of records in the layer - " + str(len(objectIDs)) + "...")
# Set the number of records per request and the number of requests that need to be made
maxRecords = 1000
# If under maxRecords, just need to make one request
if (len(objectIDs) < maxRecords):
requestsToMake = 1
else:
# Calculate the number of requests - Always round up
requestsToMake = math.ceil(float(len(objectIDs)) / float(maxRecords))
arcpy.AddMessage("Downloading data to " + arcpy.env.scratchFolder + "...")
# For every request
count = 0
while (int(requestsToMake) > count):
# Create the query
startObjectID = int(objectIDs[count*maxRecords])
# If at the final request or if there is only one request that needs to be made
if ((int(requestsToMake) == (count+1)) or (requestsToMake == 1)):
# Get the last object ID
endObjectID = int(objectIDs[len(objectIDs)-1])
serviceQuery = "OBJECTID>%3D" + str(startObjectID) + "+AND+OBJECTID<%3D" + str(endObjectID)
else:
# Start object ID plus 1000 records
endObjectID = int(objectIDs[(count*maxRecords)+maxRecords])
serviceQuery = "OBJECTID>%3D" + str(startObjectID) + "+AND+OBJECTID<" + str(endObjectID)
# Query the map service to data in json format
try:
mapServiceQuery2 = mapServiceLayer + "/query?where=" + serviceQuery + "&returnCountOnly=false&returnIdsOnly=false&returnGeometry=true&outFields=*&f=pjson"
response = urllib2.urlopen(mapServiceQuery2)
except urllib2.URLError, e:
arcpy.AddError("There was an error: %r" % e)
# Download the data
fileChunk = 16 * 1024
downloadedFile = os.path.join(arcpy.env.scratchFolder, "Data-" + str(uuid.uuid1()) + ".json")
with open(downloadedFile, 'wb') as file:
downloadCount = 0
while True:
chunk = response.read(fileChunk)
# If data size is small
if ((downloadCount == 0) and (len(chunk) < 1000)):
# Log error and end download
arcpy.AddError("No data returned, check the URL...")
sys.exit()
if not chunk:
|
# Write chunk to output file
file.write(chunk)
downloadCount = downloadCount + 1
file.close()
# If it's the first request
if (count == 0):
# Create new dataset
arcpy.JSONToFeatures_conversion(downloadedFile, os.path.join(arcpy.env.scratchGDB, "Dataset"))
else:
# Create dataset and load into existing
arcpy.JSONToFeatures_conversion(downloadedFile, "in_memory\\DatasetTemp")
arcpy.Append_management("in_memory\\DatasetTemp", os.path.join(arcpy.env.scratchGDB, "Dataset"), "NO_TEST", "", "")
# If at the final request or if there is only one request that needs to be made
if ((int(requestsToMake) == (count+1)) or (requestsToMake == 1)):
arcpy.AddMessage("Downloaded and converted JSON for " + str(len(objectIDs)) + " of " + str(len(objectIDs)) + " features...")
else:
arcpy.AddMessage("Downloaded and converted JSON for " + str((count+1)*maxRecords) + " of " + str(len(objectIDs)) + " features...")
count = count + 1
# Convert JSON to feature class
arcpy.AddMessage("Copying over final dataset...")
# Overwrite dataset
if (updateMode.lower() == "new"):
# Get record count
recordCount = arcpy.GetCount_management(os.path.join(arcpy.env.scratchGDB, "Dataset"))
arcpy.AddMessage("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Logging
if (enableLogging == "true"):
# Log record count
logger.info("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Load in data
if (recordCount > 0):
arcpy.CopyFeatures_management(os.path.join(arcpy.env.scratchGDB, "Dataset"), outputFeatureClass, "", "0", "0", "0")
# Delete and append
else:
# Get record count
recordCount = arcpy.GetCount_management(os.path.join(arcpy.env.scratchGDB, "Dataset"))
arcpy.AddMessage("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Logging
if (enableLogging == "true"):
# Log record count
logger.info("Number of records for " + outputFeatureClass + " - " + str(recordCount))
# Load in data
if (recordCount > 0):
arcpy.DeleteFeatures_management(outputFeatureClass)
arcpy.Append_management(os.path.join(arcpy.env.scratchGDB, "Dataset"), outputFeatureClass, "NO_TEST", "", "")
# --------------------------------------- End of code --------------------------------------- #
# If called from gp tool return the arcpy parameter
if __name__ == '__main__':
# Return the output if there is any
if output:
arcpy.SetParameterAsText(1, output)
# Otherwise return the result
else:
# Return the output if there is any
if output:
return output
# Logging
if (enableLogging == "true"):
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
# If arcpy error
except arcpy.ExecuteError:
# Build and show the error message
errorMessage = arcpy.GetMessages(2)
arcpy.AddError(errorMessage)
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
if (sendErrorEmail == "true"):
# Send email
sendEmail(errorMessage)
# If python error
except Exception as e:
errorMessage = ""
# Build and show the error message
for i in range(len(e.args)):
if (i == 0):
errorMessage = unicode(e.args[i]).encode('utf-8')
else:
errorMessage = errorMessage + " " + unicode(e.args[i]).encode('utf-8')
arcpy.AddError(errorMessage)
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
if (sendErrorEmail == "true"):
# Send email
sendEmail(errorMessage)
# End of main function
# Start of set logging function
def setLogging(logFile):
# Create a logger
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.DEBUG)
# Setup log message handler
logMessage = logging.FileHandler(logFile)
# Setup the log formatting
logFormat = logging.Formatter("%(asctime)s: %(levelname)s - %(message)s", "%d/%m/%Y - %H:%M:%S")
# Add formatter to log message handler
logMessage.setFormatter | break | conditional_block |
GatewayToAdventure.py | ] + "\n" for i in range(len(ls))]
print(f"\n{bcolors.CYAN}{''.join(outputList)}{bcolors.ENDC}")
sizeEssentialList = len(ls)
essentialsList = []
choiceInput = False
while choiceInput is False:
choices = input(f"{bcolors.CYAN}Input your selection as numbers 1, 2, 3, 4, or 5 separated by comma: {bcolors.ENDC}")
choiceInput = True
choices = choices.split(',')
for choice in choices:
if choice not in ('1', '2', '3', '4', '5', 'quit', 'QUIT', 'Quit'):
print(f"\n{bcolors.PINK}Please enter a valid Input{bcolors.ENDC}\n")
choiceInput = False
break
for choice in choices:
if choice.capitalize() == "Quit":
# User input "Quit" at this stage. So, just quit the game.
return choices
try:
# Convert input to integer for index in essentialList item
choices = [int(i) for i in choices]
except ValueError:
# If input is not a number, Quit gracefully!
print("Input is not a number. Quit")
return essentialsList
if max(choices) > sizeEssentialList:
print(f"Invalid input! Input is not in essentialList")
return essentialsList
for j in choices:
if self.spendCoin(amount):
essentialsList.append(ls[j-1])
else:
print(f"You don't have enough money to buy {j}. You only have {self.coins} coins left.")
break
self.assets = essentialsList
print(f"\n{bcolors.WHITE}Thank you for buying the essentials. Now you are officially ready to enter into the {self.location.worldType}.\nHere is your current asset bag with essential items and the available coins.{bcolors.ENDC}")
print(f"\n{bcolors.YELLOW}Asset Bag Contents: {self.assets}\nCoins: {self.coins}{bcolors.ENDC}")
return self.assets
def getAssets(self):
"""Returns list of assets of the user"""
return self.assets
def takeSurvivalQuiz(self, quiz):
"""Returns the result of survival quiz"""
if quiz == 'survivalQuiz1':
print(f"\n{bcolors.WHITE}Starting Survival quiz1 for {self.location.worldName} in 3 2 1....{bcolors.ENDC}")
s(2)
print(f"\n{bcolors.CYAN}To answer the following questions, input your choice by entering 'a', 'b', 'c', or 'd': {bcolors.ENDC}")
userSurvival = [(i, self.location.survivalQuiz1.questionAndAnswer.get(i)) for i in self.location.survivalQuiz1.questionAndAnswer.keys()]
elif quiz == 'survivalQuiz2':
print(f"\n{bcolors.WHITE}Starting Survival quiz2 for {self.location.worldName} in 3 2 1....{bcolors.ENDC}")
s(2)
#print(f"\n{bcolors.CYAN}To answer the following questions, input your choice by entering 'a', 'b', 'c', or 'd': {bcolors.ENDC}")
userSurvival = [(i, self.location.survivalQuiz2.questionAndAnswer.get(i)) for i in self.location.survivalQuiz2.questionAndAnswer.keys()]
else:
# Wrong argument, should not have reached here. Return anyway!
userSurvival = "Quit"
survivalResults = self.location.survivalQuizMethod(userSurvival)
return survivalResults
def navigateToCapitalcity(self, amount=20):
"""Increments number if gems by 1 if the user succesfully navigates to the capital city"""
navSuccess = False
if self.spendCoin(amount):
self.gems += 1
navSuccess = True
return navSuccess
class GameSystem:
"""This is the game engine where other classes are ingested. This class controls the whole game system and stores system information"""
def __init__(self):
self.coins = 100
self.name = "Gateway To Adventure"
self.minCorrectAnswers = 2
self.minGemsWin = 4
self.minCoinsWin = 25
self.minGemForCapitalCity = 2
self.buyGemAmount = 25
self.minEssentials = 3
self.maxAssetNum = 5
self.survivalWinCoins = 10
self.userName = "Empty"
def __str__(self):
return self.name
def getUserLocation(self, userName, worldList):
"""Returns user location after User selects a world"""
validInput = False
itemizedWorldList = [ "\n" + str(i+1) + ". " + worldList[i] for i in range(len(worldList))]
while validInput is False:
print(f"{bcolors.CYAN}Choose a world from the following: {bcolors.BOLD}{''.join(itemizedWorldList)}{bcolors.ENDC}")
userLocation = input(f"{bcolors.CYAN}Please input the exact name of the world: {bcolors.ENDC}")
userLocation = userLocation.capitalize()
if userLocation in worldList:
validInput = True
else:
if userLocation == "Quit":
validInput = True
else:
print(f"{bcolors.FAIL}Invalid world name! Please enter a correct World Name {bcolors.ENDC}")
validInput = False
return userLocation
def selectWorld(self, userLocation, userName, availableWorldList):
"""Returns world type and the result of qualifying quiz, after User selects the world"""
gameReturn = False
if userLocation in availableWorldList:
if userLocation == "Nixoterra":
world = Nixoterra()
elif userLocation == "Silvia":
world = Silvia()
elif userLocation == "Aquamundi":
world = Aquamundi()
elif userLocation == "Montelocus":
world = Montelocus()
else:
# Should not have reached here!
print(f"{bcolors.FAIL}Quitting as {userName} wants to quit the game.{bcolors.ENDC}")
return gameReturn
print(f"{bcolors.WHITE}Thank you for choosing the {world.worldType}.\n\n-------------\n{world.story}\n-------------\nNow, Let's see if you are ready for the selected world :-)\n\nStarting Qualifying quiz for {userLocation} in 3 2 1...{bcolors.ENDC}\n")
s(2)
print(f"{bcolors.CYAN}To answer the following questions, input your choice by entering 'a', 'b', 'c', or 'd'{bcolors.ENDC}")
userQualifying = [(i, world.qualifyingQuiz.questionAndAnswer.get(i)) for i in world.qualifyingQuiz.questionAndAnswer.keys()]
qualifyingResults = world.qualifyingQuizMethod(userQualifying)
if self.checkReturn(qualifyingResults, userName):
return gameReturn
else:
print(f"{bcolors.FAIL}You have already failed in the Qualifying quiz of {userLocation}. You can't reattempt thw Qualifyiong quiz.{bcolors.ENDC}")
return world, qualifyingResults
def checkReturn(self, returnVal, name):
"""Returns True if user enters 'quit' at any stage of the game"""
quitGame = False
if str(returnVal).capitalize() == "Quit":
print(f"{bcolors.FAIL}Quitting as {name} wants to quit the game.{bcolors.ENDC}")
# User entered Quit, exit and return accordingly
quitGame = True
return quitGame
def evaluateSurvialQuiz(self, player):
"""Evaluates and displays result of survival quiz and also displays the user's current asset"""
print(f"\n{bcolors.WHITE}Let's start earning some more coins by answering survival questions that will help you survive in the world{bcolors.ENDC}")
quizList = ['survivalQuiz1', 'survivalQuiz2']
perkReattempt = True
for quiz in quizList:
survivalResults = player.takeSurvivalQuiz(quiz)
if self.checkReturn(survivalResults, player.name):
return False
if not survivalResults:
| print(f"{bcolors.FAIL}Sorry, you failed.{bcolors.ENDC}")
if len(player.getAssets()) > self.minEssentials and perkReattempt:
print(f"{bcolors.WHITE}You are eligible for 1 reattempt.{bcolors.ENDC}")
perkReattempt = False
survivalResults = player.takeSurvivalQuiz(quiz)
if self.checkReturn(survivalResults, player.name):
return False
if not survivalResults:
print(f"{bcolors.FAIL}Sorry, you failed again.{bcolors.ENDC}")
else:
player.earnCoin(self.survivalWinCoins * survivalResults)
player.collectGem()
print(f"{bcolors.YELLOW}Congrats! {player.name}, you have cleared {quiz}{bcolors.ENDC}")
#print(f"current coins: {player.getCoins()}\ncurrent Gems:{player.getGems()}") | conditional_block | |
GatewayToAdventure.py |
class GameWorld:
"""Base class for creating different worlds in the game"""
def __init__(self, name='gameWorld', typeW='all'):
self.worldName = name
self.worldType = typeW
self.essentials = []
self.qualifyingQuiz = QualifyingQuiz()
self.survivalQuiz1 = SurvivalQuiz()
self.survivalQuiz2 = SurvivalQuiz()
def qualifyingQuizMethod(self, questionAndAnswer):
"""Returns number of correct answers for Qualifying quiz. Compares User response with the desired answer from the existing dictionary"""
questionsRight = 0
for question in questionAndAnswer:
answer = input(f"{bcolors.CYAN} {question[0]}\n: {bcolors.ENDC}")
if answer == question[1]:
questionsRight += 1
elif answer.capitalize() == "Quit":
# User wants to Quit the Game now
print(f"User wants to quit the game now")
questionsRight = "Quit"
break
return questionsRight
def survivalQuizMethod(self, questionAndAnswer):
"""Returns 1 if User correctly answers all the question of the Survival quiz, else 0. Compares User response with the desired answer from the existing dictionary"""
questionsRight = 1
for question in questionAndAnswer:
answer = input(f"\n{bcolors.CYAN}{question[0]}\n:{bcolors.ENDC}")
if answer == question[1]:
questionsRight *= 1
elif answer != question[1]:
questionsRight *= 0
elif answer.capitalize() == "Quit":
# User wants to Quit the Game now
print(f"User wants to quit the game now")
questionsRight = "Quit"
break
return questionsRight
def getEssentialList(self):
"""Returns list of essentials for a particular world"""
return self.essentials
def __str__(self):
return self.worldName
class Nixoterra(GameWorld):
"""Child class for Snow World - Nixoterra"""
#name = "Nixoterra"
qNADictQ = {
"Q1: What is the freezing point of snow?\n a. 0 degrees celcius\n b. 30 degrees celcius\n c. 100 degrees celcius\n d. -5 degrees celcius": "d",
"Q2: Which ball do we use in the snow ball activity at snow world?\n a. Leather Ball\n b. Foot Ball\n c. Snow Ball\n d. All of the above": "d",
"Q3: Which cartoon character does snow world have?\n a. Penguin\n b. Olaf\n c. Snowman\n d. All of the above": "d"
}
qNADictS1 = {
"Q1: Describe a typical workweek for a snowboard instructor\n a. Sleep all week long\n b. Play with Ice\n c. Instruct and provide training to groups/individuals": "c",
"Q2: what's your level of experience with edging, waxing and mounting skis and snowboard?\n a. Novice\n b. Proficient\n c. expert": "c"
}
qNADictS2 = {
"Q1: What equipment is/are required for working as an experienced snowmaker\n a. Snow guns\n b. hoses\n c. hydrants\n d. All of the above": "d",
"Q2: Can you safely lift 50 lbs\n a. Yes\n b. No": "a"
}
def __init__(self):
self.worldName = "Nixoterra"
self.worldType = "SnowWorld"
self.essentials = ["Mittens", "Snowboots", "Ski Goggles", "Wool Socks", "Hand warmer"]
self.story = "Ice, Chill, Snow, Cold, Freezerealm, Glacia. You name it. \nThey are all names for the mystic Nixoterra. Allow me, Nixette, to guide you through this world. This land is the mother of all snow and ice. Explore this world carefully, because the temperatures are- well, piercing. But other than that, just chill."
#ToDo: Fill real name of capital city
self.qualifyingQuiz = QualifyingQuiz(Nixoterra.qNADictQ)
self.survivalQuiz1 = SurvivalQuiz(Nixoterra.qNADictS1)
self.survivalQuiz2 = SurvivalQuiz(Nixoterra.qNADictS2)
class Silvia(GameWorld):
"""Child class for Forest World - Silvia"""
#name = "Silvia"
qNADictQ = {
"Q1: Can you use a GPS device effectively?\n a. Yes\n b. No": "a",
"Q2: Which of the following statement is true?\n a. Forest is on land\n b. Forest is under water": "a",
"Q3: What NOT to do in a Forest world?\n a. Start fire\n b. Climb on Tree\n c. Enjoy Nature": "a"
}
qNADictS1 = {
"Q1: Name one of the heavy equipments used in the forest operations\n a. Dump trucks\n b. Toycar\n c. Teddybear": "a",
"Q2: Which is NOT one of the 4 forest types?\n a. Temperate\n b. Tropical\n c. Subtropical\n d. Arboreal" : "d"
}
qNADictS2 = {
"Q1: Forest soil is a natural water filter\n a. True\n b. False": "a",
"Q2: What is the definition of a forest?\n a. A public green area in a town, used for recreation.\n b. An area rich in biodiversity\n c. A large area of land densely populated by trees\n d. A flat area covered in plants" : "c"
}
def __init__(self):
self.worldName = "Silvia"
self.worldType = "ForestWorld"
self.essentials = ["Mosquito Repellent", "Backpack", "Poncho", "Hiking boots", "Trail shoes"]
self.story = "Hoy there! I'm Bryn, and I will be your personal guide through this wonderful world of trees and plants. \nWelcome to the forest, young explorer! These trees are sacred creatures who descended from the heavens. Enjoy your time in this wonderful place that you can call home"
self.qualifyingQuiz = QualifyingQuiz(Silvia.qNADictQ)
self.survivalQuiz1 = SurvivalQuiz(Silvia.qNADictS1)
self.survivalQuiz2 = SurvivalQuiz(Silvia.qNADictS2)
class Aquamundi(GameWorld):
"""Child class for Water World - Aquamundi"""
#name = "Aquamundi"
qNADictQ = {
"Q1: Spell Water\n a. Water\n b. Waiter": "a",
"Q2: Is water same as wind?\n a. Yes\n b. No": "b",
"Q3: Do you know how to swim?\n a. Yes\n b. No": "a"
}
qNADictS1 = {
"Q1: Why do you want to be a Dive instructor?\n a. I like the word Instructor\n b. I love teaching diving": "b",
"Q2: Do you have a Captain's license?\n a. Yes\n b. No": "a"
}
qNADictS2 = {
"Q1: In case of any emergency call, what will be your first step?\n a. Ignore\n b. Immediately act on it": "b",
"Q2: What is a large natural stream of flowing water that ends in a sea?\n a. Lake\n b. Ocean\n c. Glacier\n d. River": "d"
}
def __init__(self):
self.worldName = "Aquamundi"
self.worldType = "WaterWorld"
self.essentials = ["Swimcap", "Bandana", "Float suit", "Rash guard", "Alternate Air Source"]
self.story = "I see, you chose the water world. Great choice! I'm Aquanna, and I will be your guide in Aquamundi. If you are wondering when this endless ocean ends, don't waste your time. \nThis is a huge world, filled with nothing but water. Let's hop on our big boat and start this adventure!"
self.qualifyingQuiz = QualifyingQuiz(Aquamundi.qNADictQ)
self.survivalQuiz1 = SurvivalQuiz(Aquamundi.qNADictS1)
self.survivalQuiz2 = SurvivalQuiz(Aquamundi.qNADictS2)
class Montelocus(GameWorld):
"""Child class for Mountain World - Montelocus"""
#name = "Montelocus"
qNADictQ = {
"Q1: Do you know how to use mountain gear?\n a. Yes | """Child class to Quiz class that stores dictionary of questions and answers for the Survival quizzes"""
def __init__(self, qNADict={"Ques2": "Ans2"}):
self.questionAndAnswer = qNADict | identifier_body | |
GatewayToAdventure.py | :
"""Base class to store dictionary of questions and answers for the game quizzes"""
def __init__(self, qNADict={"Ques": "Ans"}):
self.questionAndAnswer = qNADict
class QualifyingQuiz(Quiz):
"""Child class to Quiz class that stores dictionary of questions and answers for the qualifying quiz"""
def __init__(self, qNADict={"Ques1": "Ans1"}):
self.questionAndAnswer = qNADict
class SurvivalQuiz(Quiz):
"""Child class to Quiz class that stores dictionary of questions and answers for the Survival quizzes"""
def __init__(self, qNADict={"Ques2": "Ans2"}):
self.questionAndAnswer = qNADict
class GameWorld:
"""Base class for creating different worlds in the game"""
def __init__(self, name='gameWorld', typeW='all'):
self.worldName = name
self.worldType = typeW
self.essentials = []
self.qualifyingQuiz = QualifyingQuiz()
self.survivalQuiz1 = SurvivalQuiz()
self.survivalQuiz2 = SurvivalQuiz()
def qualifyingQuizMethod(self, questionAndAnswer):
"""Returns number of correct answers for Qualifying quiz. Compares User response with the desired answer from the existing dictionary"""
questionsRight = 0
for question in questionAndAnswer:
answer = input(f"{bcolors.CYAN} {question[0]}\n: {bcolors.ENDC}")
if answer == question[1]:
questionsRight += 1
elif answer.capitalize() == "Quit":
# User wants to Quit the Game now
print(f"User wants to quit the game now")
questionsRight = "Quit"
break
return questionsRight
def survivalQuizMethod(self, questionAndAnswer):
"""Returns 1 if User correctly answers all the question of the Survival quiz, else 0. Compares User response with the desired answer from the existing dictionary"""
questionsRight = 1
for question in questionAndAnswer:
answer = input(f"\n{bcolors.CYAN}{question[0]}\n:{bcolors.ENDC}")
if answer == question[1]:
questionsRight *= 1
elif answer != question[1]:
questionsRight *= 0
elif answer.capitalize() == "Quit":
# User wants to Quit the Game now
print(f"User wants to quit the game now")
questionsRight = "Quit"
break
return questionsRight
def getEssentialList(self):
"""Returns list of essentials for a particular world"""
return self.essentials
def __str__(self):
return self.worldName
class Nixoterra(GameWorld):
"""Child class for Snow World - Nixoterra"""
#name = "Nixoterra"
qNADictQ = {
"Q1: What is the freezing point of snow?\n a. 0 degrees celcius\n b. 30 degrees celcius\n c. 100 degrees celcius\n d. -5 degrees celcius": "d",
"Q2: Which ball do we use in the snow ball activity at snow world?\n a. Leather Ball\n b. Foot Ball\n c. Snow Ball\n d. All of the above": "d",
"Q3: Which cartoon character does snow world have?\n a. Penguin\n b. Olaf\n c. Snowman\n d. All of the above": "d"
}
qNADictS1 = {
"Q1: Describe a typical workweek for a snowboard instructor\n a. Sleep all week long\n b. Play with Ice\n c. Instruct and provide training to groups/individuals": "c",
"Q2: what's your level of experience with edging, waxing and mounting skis and snowboard?\n a. Novice\n b. Proficient\n c. expert": "c"
}
qNADictS2 = {
"Q1: What equipment is/are required for working as an experienced snowmaker\n a. Snow guns\n b. hoses\n c. hydrants\n d. All of the above": "d",
"Q2: Can you safely lift 50 lbs\n a. Yes\n b. No": "a"
}
def __init__(self):
self.worldName = "Nixoterra"
self.worldType = "SnowWorld"
self.essentials = ["Mittens", "Snowboots", "Ski Goggles", "Wool Socks", "Hand warmer"]
self.story = "Ice, Chill, Snow, Cold, Freezerealm, Glacia. You name it. \nThey are all names for the mystic Nixoterra. Allow me, Nixette, to guide you through this world. This land is the mother of all snow and ice. Explore this world carefully, because the temperatures are- well, piercing. But other than that, just chill."
#ToDo: Fill real name of capital city
self.qualifyingQuiz = QualifyingQuiz(Nixoterra.qNADictQ)
self.survivalQuiz1 = SurvivalQuiz(Nixoterra.qNADictS1)
self.survivalQuiz2 = SurvivalQuiz(Nixoterra.qNADictS2)
class Silvia(GameWorld):
"""Child class for Forest World - Silvia"""
#name = "Silvia"
qNADictQ = {
"Q1: Can you use a GPS device effectively?\n a. Yes\n b. No": "a",
"Q2: Which of the following statement is true?\n a. Forest is on land\n b. Forest is under water": "a",
"Q3: What NOT to do in a Forest world?\n a. Start fire\n b. Climb on Tree\n c. Enjoy Nature": "a"
}
qNADictS1 = {
"Q1: Name one of the heavy equipments used in the forest operations\n a. Dump trucks\n b. Toycar\n c. Teddybear": "a",
"Q2: Which is NOT one of the 4 forest types?\n a. Temperate\n b. Tropical\n c. Subtropical\n d. Arboreal" : "d"
}
qNADictS2 = {
"Q1: Forest soil is a natural water filter\n a. True\n b. False": "a",
"Q2: What is the definition of a forest?\n a. A public green area in a town, used for recreation.\n b. An area rich in biodiversity\n c. A large area of land densely populated by trees\n d. A flat area covered in plants" : "c"
}
def __init__(self):
self.worldName = "Silvia"
self.worldType = "ForestWorld"
self.essentials = ["Mosquito Repellent", "Backpack", "Poncho", "Hiking boots", "Trail shoes"]
self.story = "Hoy there! I'm Bryn, and I will be your personal guide through this wonderful world of trees and plants. \nWelcome to the forest, young explorer! These trees are sacred creatures who descended from the heavens. Enjoy your time in this wonderful place that you can call home"
self.qualifyingQuiz = QualifyingQuiz(Silvia.qNADictQ)
self.survivalQuiz1 = SurvivalQuiz(Silvia.qNADictS1)
self.survivalQuiz2 = SurvivalQuiz(Silvia.qNADictS2)
class Aquamundi(GameWorld):
"""Child class for Water World - Aquamundi"""
#name = "Aquamundi"
qNADictQ = {
"Q1: Spell Water\n a. Water\n b. Waiter": "a",
"Q2: Is water same as wind?\n a. Yes\n b. No": "b",
"Q3: Do you know how to swim?\n a. Yes\n b. No": "a"
}
qNADictS1 = {
"Q1: Why do you want to be a Dive instructor?\n a. I like the word Instructor\n b. I love teaching diving": "b",
"Q2: Do you have a Captain's license?\n a. Yes\n b. No": "a"
}
qNADictS2 = {
"Q1: In case of any emergency call, what will be your first step?\n a. Ignore\n b. Immediately act on it": "b",
"Q2: What is a large natural stream of flowing water that ends in a sea?\n a. Lake\n b. Ocean\n c. Glacier\n d. River": "d"
}
def __init__(self):
self.worldName = "Aquamundi"
self.worldType = "WaterWorld"
self.essentials = ["Swimcap", "Bandana", "Float suit", "Rash guard", "Alternate Air Source"]
self.story = "I see, you chose the water world. Great choice! I'm Aquanna, and I will be your guide in Aquamundi. If you are wondering when this endless ocean ends, don't waste your time. \nThis is a huge world, filled with nothing but water. Let's hop on our big boat and start this adventure | Quiz | identifier_name | |
GatewayToAdventure.py | "
}
qNADictS1 = {
"Q1: What would you select to climb in tricky mountain conditions?\n a. Mountaineering boots\n b. mountains": "a",
"Q2: What would you do if someone calls for help?\n a. It's their problem, I will just ignore\n b. Go and help them": "b"
}
qNADictS2 = {
"Q1: Which of the following is the highest part of a mountain?\n a. Peak\n b. Base\n c. Slope\n d. None of the above": "a",
"Q2: What should you do if you fall off a mountain during Climbing?\n a. Have a quick snack\n b. Make a quick phone call\n c. Take a selfie and post it on instagram\n d. Use a harness": "d"
}
def __init__(self):
self.worldName = "Montelocus"
self.worldType = "MountainWorld"
self.essentials = ["Climbing Helmet", "Ropes", "Harness", "Mountaineering Boots", "Crampons"]
self.story = "Isn't it beautiful, the divine mountain range of Montelocus. Oh hey! I didn't see you there! I'm Cashel, and I will lead you through the wonders of this rocky world, with mountains everywhere. \nThese mountains are said to be a gift from the sky. Take a moment to appreciate this particularly beautiful aspect of nature! \nAnyway, see you around!"
self.qualifyingQuiz = QualifyingQuiz(Montelocus.qNADictQ)
self.survivalQuiz1 = SurvivalQuiz(Montelocus.qNADictS1)
self.survivalQuiz2 = SurvivalQuiz(Montelocus.qNADictS2)
class User:
"""Class for storing user information and tracking user activity throughout multiple stages of the game"""
def __init__(self, name, location, system, coins=100):
self.name = name
self.location = location
# not sure if we need it
self.system = system
self.coins = coins
self.gems = 0
self.assets = []
def __str__(self):
return self.name
def collectGem(self):
"""Increments gem count of user by 1, if the user earns gem"""
self.gems += 1
def buyGem(self, amount):
"""Increments gem count of user by 1 if the buys the gem by spending coins"""
returnVal = False
if self.spendCoin(amount=25):
self.gems += 1
returnVal = True
return returnVal
def earnCoin(self, amount):
"""Increments count of coins by a certain amount"""
self.coins += amount
def spendCoin(self, amount):
"""Decreases count of coins by a certain amount"""
returnVal = False
if self.coins >= amount:
self.coins -= amount
returnVal = True
return returnVal
def getCoins(self):
"""Returns number of coins of the user"""
return self.coins
def getGems(self):
"""Returns number of gems of the user"""
return self.gems
def buyEssentials(self, amount=5):
"""Returns list of essential items bought by the user for the selected world and displays the list of user's assets."""
ls = self.location.getEssentialList()
print(f"{bcolors.WHITE}\nGreat job so far! Now, it's time to arm yourself with some essentials you would need to survive in the {self.location.worldType}.{bcolors.ENDC}")
print(f"\n{bcolors.CYAN}Following are the essential items for {self.location.worldName}. Please choose a minimum of 3 items to proceed.{bcolors.ENDC}")
outputList = [str(i+1) + ". " + ls[i] + "\n" for i in range(len(ls))]
print(f"\n{bcolors.CYAN}{''.join(outputList)}{bcolors.ENDC}")
sizeEssentialList = len(ls)
essentialsList = []
choiceInput = False
while choiceInput is False:
choices = input(f"{bcolors.CYAN}Input your selection as numbers 1, 2, 3, 4, or 5 separated by comma: {bcolors.ENDC}")
choiceInput = True
choices = choices.split(',')
for choice in choices:
if choice not in ('1', '2', '3', '4', '5', 'quit', 'QUIT', 'Quit'):
print(f"\n{bcolors.PINK}Please enter a valid Input{bcolors.ENDC}\n")
choiceInput = False
break
for choice in choices:
if choice.capitalize() == "Quit":
# User input "Quit" at this stage. So, just quit the game.
return choices
try:
# Convert input to integer for index in essentialList item
choices = [int(i) for i in choices]
except ValueError:
# If input is not a number, Quit gracefully!
print("Input is not a number. Quit")
return essentialsList
if max(choices) > sizeEssentialList:
print(f"Invalid input! Input is not in essentialList")
return essentialsList
for j in choices:
if self.spendCoin(amount):
essentialsList.append(ls[j-1])
else:
print(f"You don't have enough money to buy {j}. You only have {self.coins} coins left.")
break
self.assets = essentialsList
print(f"\n{bcolors.WHITE}Thank you for buying the essentials. Now you are officially ready to enter into the {self.location.worldType}.\nHere is your current asset bag with essential items and the available coins.{bcolors.ENDC}")
print(f"\n{bcolors.YELLOW}Asset Bag Contents: {self.assets}\nCoins: {self.coins}{bcolors.ENDC}")
return self.assets
def getAssets(self):
"""Returns list of assets of the user"""
return self.assets
def takeSurvivalQuiz(self, quiz):
"""Returns the result of survival quiz"""
if quiz == 'survivalQuiz1':
print(f"\n{bcolors.WHITE}Starting Survival quiz1 for {self.location.worldName} in 3 2 1....{bcolors.ENDC}")
s(2)
print(f"\n{bcolors.CYAN}To answer the following questions, input your choice by entering 'a', 'b', 'c', or 'd': {bcolors.ENDC}")
userSurvival = [(i, self.location.survivalQuiz1.questionAndAnswer.get(i)) for i in self.location.survivalQuiz1.questionAndAnswer.keys()]
elif quiz == 'survivalQuiz2':
print(f"\n{bcolors.WHITE}Starting Survival quiz2 for {self.location.worldName} in 3 2 1....{bcolors.ENDC}")
s(2)
#print(f"\n{bcolors.CYAN}To answer the following questions, input your choice by entering 'a', 'b', 'c', or 'd': {bcolors.ENDC}")
userSurvival = [(i, self.location.survivalQuiz2.questionAndAnswer.get(i)) for i in self.location.survivalQuiz2.questionAndAnswer.keys()]
else:
# Wrong argument, should not have reached here. Return anyway!
userSurvival = "Quit"
survivalResults = self.location.survivalQuizMethod(userSurvival)
return survivalResults
def navigateToCapitalcity(self, amount=20):
"""Increments number if gems by 1 if the user succesfully navigates to the capital city"""
navSuccess = False
if self.spendCoin(amount):
self.gems += 1
navSuccess = True
return navSuccess
class GameSystem:
"""This is the game engine where other classes are ingested. This class controls the whole game system and stores system information"""
def __init__(self):
self.coins = 100
self.name = "Gateway To Adventure"
self.minCorrectAnswers = 2
self.minGemsWin = 4
self.minCoinsWin = 25
self.minGemForCapitalCity = 2
self.buyGemAmount = 25
self.minEssentials = 3
self.maxAssetNum = 5
self.survivalWinCoins = 10
self.userName = "Empty"
def __str__(self):
return self.name
def getUserLocation(self, userName, worldList):
"""Returns user location after User selects a world"""
validInput = False
itemizedWorldList = [ "\n" + str(i+1) + ". " + worldList[i] for i in range(len(worldList))]
while validInput is False:
print(f"{bcolors.CYAN}Choose a world from the following: {bcolors.BOLD}{''.join(itemizedWorldList)}{bcolors.ENDC}")
userLocation = input(f"{bcolors.CYAN}Please input the exact name of the world: {bcolors.ENDC}")
| userLocation = userLocation.capitalize()
if userLocation in worldList:
validInput = True
else: | random_line_split | |
eqivalence.go | "
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
hashutil "k8s.io/kubernetes/pkg/util/hash"
"github.com/golang/glog"
)
// Cache saves and reuses the output of predicate functions. Use RunPredicate to
// get or update the cached results. An appropriate Invalidate* function should
// be called when some predicate results are no longer valid.
//
// Internally, results are keyed by node name, predicate name, and "equivalence
// class". (Equivalence class is defined in the `Class` type.) Saved results
// will be reused until an appropriate invalidation function is called.
type Cache struct {
mu sync.RWMutex
cache nodeMap
}
// NewCache returns an empty Cache.
func NewCache() *Cache {
return &Cache{
cache: make(nodeMap),
}
}
// Class represents a set of pods which are equivalent from the perspective of
// the scheduler. i.e. the scheduler would make the same decision for any pod
// from the same class.
type Class struct {
// Equivalence hash
hash uint64
}
// NewClass returns the equivalence class for a given Pod. The returned Class
// objects will be equal for two Pods in the same class. nil values should not
// be considered equal to each other.
//
// NOTE: Make sure to compare types of Class and not *Class.
// TODO(misterikkit): Return error instead of nil *Class.
func NewClass(pod *v1.Pod) *Class {
equivalencePod := getEquivalencePod(pod)
if equivalencePod != nil {
hash := fnv.New32a()
hashutil.DeepHashObject(hash, equivalencePod)
return &Class{
hash: uint64(hash.Sum32()),
}
}
return nil
}
// nodeMap stores PredicateCaches with node name as the key.
type nodeMap map[string]predicateMap
// predicateMap stores resultMaps with predicate name as the key.
type predicateMap map[string]resultMap
// resultMap stores PredicateResult with pod equivalence hash as the key.
type resultMap map[uint64]predicateResult
// predicateResult stores the output of a FitPredicate.
type predicateResult struct {
Fit bool
FailReasons []algorithm.PredicateFailureReason
}
// RunPredicate returns a cached predicate result. In case of a cache miss, the predicate will be
// run and its results cached for the next call.
//
// NOTE: RunPredicate will not update the equivalence cache if the given NodeInfo is stale.
func (c *Cache) RunPredicate(
pred algorithm.FitPredicate,
predicateKey string,
pod *v1.Pod,
meta algorithm.PredicateMetadata,
nodeInfo *schedulercache.NodeInfo,
equivClass *Class,
cache schedulercache.Cache,
) (bool, []algorithm.PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen during tests.
return false, []algorithm.PredicateFailureReason{}, fmt.Errorf("nodeInfo is nil or node is invalid")
}
result, ok := c.lookupResult(pod.GetName(), nodeInfo.Node().GetName(), predicateKey, equivClass.hash)
if ok {
return result.Fit, result.FailReasons, nil
}
fit, reasons, err := pred(pod, meta, nodeInfo)
if err != nil {
return fit, reasons, err
}
if cache != nil {
c.updateResult(pod.GetName(), predicateKey, fit, reasons, equivClass.hash, cache, nodeInfo)
}
return fit, reasons, nil
}
// updateResult updates the cached result of a predicate.
func (c *Cache) updateResult(
podName, predicateKey string,
fit bool,
reasons []algorithm.PredicateFailureReason,
equivalenceHash uint64,
cache schedulercache.Cache,
nodeInfo *schedulercache.NodeInfo,
) {
c.mu.Lock()
defer c.mu.Unlock()
if nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen during tests.
return
}
// Skip update if NodeInfo is stale.
if !cache.IsUpToDate(nodeInfo) {
return
}
nodeName := nodeInfo.Node().GetName()
if _, exist := c.cache[nodeName]; !exist {
c.cache[nodeName] = make(predicateMap)
}
predicateItem := predicateResult{
Fit: fit,
FailReasons: reasons,
}
// if cached predicate map already exists, just update the predicate by key
if predicates, ok := c.cache[nodeName][predicateKey]; ok {
// maps in golang are references, no need to add them back
predicates[equivalenceHash] = predicateItem
} else {
c.cache[nodeName][predicateKey] =
resultMap{
equivalenceHash: predicateItem,
}
}
glog.V(5).Infof("Cache update: node=%s,predicate=%s,pod=%s,value=%v", nodeName, predicateKey, podName, predicateItem)
}
// lookupResult returns cached predicate results and a bool saying whether a
// cache entry was found.
func (c *Cache) lookupResult(
podName, nodeName, predicateKey string,
equivalenceHash uint64,
) (value predicateResult, ok bool) {
c.mu.RLock()
defer c.mu.RUnlock()
glog.V(5).Infof("Cache lookup: node=%s,predicate=%s,pod=%s", nodeName, predicateKey, podName)
value, ok = c.cache[nodeName][predicateKey][equivalenceHash]
return value, ok
}
// InvalidatePredicates clears all cached results for the given predicates.
func (c *Cache) InvalidatePredicates(predicateKeys sets.String) |
// InvalidatePredicatesOnNode clears cached results for the given predicates on one node.
func (c *Cache) InvalidatePredicatesOnNode(nodeName string, predicateKeys sets.String) {
if len(predicateKeys) == 0 {
return
}
c.mu.Lock()
defer c.mu.Unlock()
for predicateKey := range predicateKeys {
delete(c.cache[nodeName], predicateKey)
}
glog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys)
}
// InvalidateAllPredicatesOnNode clears all cached results for one node.
func (c *Cache) InvalidateAllPredicatesOnNode(nodeName string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.cache, nodeName)
glog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName)
}
// InvalidateCachedPredicateItemForPodAdd is a wrapper of
// InvalidateCachedPredicateItem for pod add case
// TODO: This does not belong with the equivalence cache implementation.
func (c *Cache) InvalidateCachedPredicateItemForPodAdd(pod *v1.Pod, nodeName string) {
// MatchInterPodAffinity: we assume scheduler can make sure newly bound pod
// will not break the existing inter pod affinity. So we does not need to
// invalidate MatchInterPodAffinity when pod added.
//
// But when a pod is deleted, existing inter pod affinity may become invalid.
// (e.g. this pod was preferred by some else, or vice versa)
//
// NOTE: assumptions above will not stand when we implemented features like
// RequiredDuringSchedulingRequiredDuringExecution.
// NoDiskConflict: the newly scheduled pod fits to existing pods on this node,
// it will also fits to equivalence class of existing pods
// GeneralPredicates: will always be affected by adding a new pod
invalidPredicates := sets.NewString(predicates.GeneralPred)
// MaxPDVolumeCountPredicate: we check the volumes of pod to make decision.
for _, vol := range pod.Spec.Volumes {
if vol.PersistentVolumeClaim != nil {
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred, predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred)
} else {
if vol.AWSElasticBlockStore != nil {
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred)
}
if vol.GCEPersistentDisk != nil {
invalidPredicates.Insert(predicates.MaxGCEPDVolumeCountPred)
}
if vol.AzureDisk != nil {
invalidPredicates.Insert(predicates.MaxAzureDiskVolumeCountPred)
}
}
}
c.InvalidatePredicatesOnNode(nodeName, invalidPredicates)
}
// equivalencePod is the set of pod attributes which must match for two pods to
// be considered equivalent for scheduling purposes. For correctness, this must
// include any Pod field which is used by a FitPredicate.
//
// NOTE: For equivalence hash to be formally correct, lists and maps in the
// equivalencePod should be normalized. (e.g. by sorting them) However, the vast
// majority of equivalent pod classes are expected to be created from a single
// pod template, so they | {
if len(predicateKeys) == 0 {
return
}
c.mu.Lock()
defer c.mu.Unlock()
// c.cache uses nodeName as key, so we just iterate it and invalid given predicates
for _, predicates := range c.cache {
for predicateKey := range predicateKeys {
delete(predicates, predicateKey)
}
}
glog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys)
} | identifier_body |
eqivalence.go | sets"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
hashutil "k8s.io/kubernetes/pkg/util/hash"
"github.com/golang/glog"
)
// Cache saves and reuses the output of predicate functions. Use RunPredicate to
// get or update the cached results. An appropriate Invalidate* function should
// be called when some predicate results are no longer valid.
//
// Internally, results are keyed by node name, predicate name, and "equivalence
// class". (Equivalence class is defined in the `Class` type.) Saved results
// will be reused until an appropriate invalidation function is called.
type Cache struct {
mu sync.RWMutex
cache nodeMap
}
// NewCache returns an empty Cache.
func NewCache() *Cache {
return &Cache{
cache: make(nodeMap),
}
}
// Class represents a set of pods which are equivalent from the perspective of
// the scheduler. i.e. the scheduler would make the same decision for any pod
// from the same class.
type Class struct {
// Equivalence hash
hash uint64
}
// NewClass returns the equivalence class for a given Pod. The returned Class
// objects will be equal for two Pods in the same class. nil values should not
// be considered equal to each other.
//
// NOTE: Make sure to compare types of Class and not *Class.
// TODO(misterikkit): Return error instead of nil *Class.
func NewClass(pod *v1.Pod) *Class {
equivalencePod := getEquivalencePod(pod)
if equivalencePod != nil {
hash := fnv.New32a()
hashutil.DeepHashObject(hash, equivalencePod)
return &Class{
hash: uint64(hash.Sum32()),
}
}
return nil
}
// nodeMap stores PredicateCaches with node name as the key.
type nodeMap map[string]predicateMap
// predicateMap stores resultMaps with predicate name as the key.
type predicateMap map[string]resultMap
// resultMap stores PredicateResult with pod equivalence hash as the key.
type resultMap map[uint64]predicateResult
// predicateResult stores the output of a FitPredicate.
type predicateResult struct {
Fit bool
FailReasons []algorithm.PredicateFailureReason
}
// RunPredicate returns a cached predicate result. In case of a cache miss, the predicate will be
// run and its results cached for the next call.
//
// NOTE: RunPredicate will not update the equivalence cache if the given NodeInfo is stale.
func (c *Cache) RunPredicate(
pred algorithm.FitPredicate,
predicateKey string,
pod *v1.Pod,
meta algorithm.PredicateMetadata,
nodeInfo *schedulercache.NodeInfo,
equivClass *Class,
cache schedulercache.Cache,
) (bool, []algorithm.PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen during tests.
return false, []algorithm.PredicateFailureReason{}, fmt.Errorf("nodeInfo is nil or node is invalid")
}
result, ok := c.lookupResult(pod.GetName(), nodeInfo.Node().GetName(), predicateKey, equivClass.hash)
if ok {
return result.Fit, result.FailReasons, nil
}
fit, reasons, err := pred(pod, meta, nodeInfo)
if err != nil {
return fit, reasons, err
}
if cache != nil {
c.updateResult(pod.GetName(), predicateKey, fit, reasons, equivClass.hash, cache, nodeInfo)
}
return fit, reasons, nil
}
// updateResult updates the cached result of a predicate.
func (c *Cache) updateResult(
podName, predicateKey string,
fit bool,
reasons []algorithm.PredicateFailureReason,
equivalenceHash uint64,
cache schedulercache.Cache,
nodeInfo *schedulercache.NodeInfo,
) {
c.mu.Lock()
defer c.mu.Unlock()
if nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen during tests.
return
}
// Skip update if NodeInfo is stale.
if !cache.IsUpToDate(nodeInfo) {
return
}
nodeName := nodeInfo.Node().GetName()
if _, exist := c.cache[nodeName]; !exist {
c.cache[nodeName] = make(predicateMap)
}
predicateItem := predicateResult{
Fit: fit,
FailReasons: reasons,
}
// if cached predicate map already exists, just update the predicate by key
if predicates, ok := c.cache[nodeName][predicateKey]; ok {
// maps in golang are references, no need to add them back
predicates[equivalenceHash] = predicateItem
} else {
c.cache[nodeName][predicateKey] =
resultMap{
equivalenceHash: predicateItem,
}
}
glog.V(5).Infof("Cache update: node=%s,predicate=%s,pod=%s,value=%v", nodeName, predicateKey, podName, predicateItem)
}
// lookupResult returns cached predicate results and a bool saying whether a
// cache entry was found.
func (c *Cache) lookupResult(
podName, nodeName, predicateKey string,
equivalenceHash uint64,
) (value predicateResult, ok bool) {
c.mu.RLock()
defer c.mu.RUnlock()
glog.V(5).Infof("Cache lookup: node=%s,predicate=%s,pod=%s", nodeName, predicateKey, podName)
value, ok = c.cache[nodeName][predicateKey][equivalenceHash]
return value, ok
}
// InvalidatePredicates clears all cached results for the given predicates.
func (c *Cache) InvalidatePredicates(predicateKeys sets.String) {
if len(predicateKeys) == 0 {
return
}
c.mu.Lock()
defer c.mu.Unlock()
// c.cache uses nodeName as key, so we just iterate it and invalid given predicates
for _, predicates := range c.cache {
for predicateKey := range predicateKeys {
delete(predicates, predicateKey)
}
}
glog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys)
}
// InvalidatePredicatesOnNode clears cached results for the given predicates on one node.
func (c *Cache) InvalidatePredicatesOnNode(nodeName string, predicateKeys sets.String) {
if len(predicateKeys) == 0 {
return
}
c.mu.Lock()
defer c.mu.Unlock()
for predicateKey := range predicateKeys {
delete(c.cache[nodeName], predicateKey)
}
glog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys)
}
// InvalidateAllPredicatesOnNode clears all cached results for one node.
func (c *Cache) InvalidateAllPredicatesOnNode(nodeName string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.cache, nodeName)
glog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName)
}
// InvalidateCachedPredicateItemForPodAdd is a wrapper of
// InvalidateCachedPredicateItem for pod add case
// TODO: This does not belong with the equivalence cache implementation.
func (c *Cache) InvalidateCachedPredicateItemForPodAdd(pod *v1.Pod, nodeName string) {
// MatchInterPodAffinity: we assume scheduler can make sure newly bound pod
// will not break the existing inter pod affinity. So we does not need to
// invalidate MatchInterPodAffinity when pod added.
//
// But when a pod is deleted, existing inter pod affinity may become invalid.
// (e.g. this pod was preferred by some else, or vice versa)
//
// NOTE: assumptions above will not stand when we implemented features like
// RequiredDuringSchedulingRequiredDuringExecution. | // NoDiskConflict: the newly scheduled pod fits to existing pods on this node,
// it will also fits to equivalence class of existing pods
// GeneralPredicates: will always be affected by adding a new pod
invalidPredicates := sets.NewString(predicates.GeneralPred)
// MaxPDVolumeCountPredicate: we check the volumes of pod to make decision.
for _, vol := range pod.Spec.Volumes {
if vol.PersistentVolumeClaim != nil {
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred, predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred)
} else {
if vol.AWSElasticBlockStore != nil {
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred)
}
if vol.GCEPersistentDisk != nil {
invalidPredicates.Insert(predicates.MaxGCEPDVolumeCountPred)
}
if vol.AzureDisk != nil {
invalidPredicates.Insert(predicates.MaxAzureDiskVolumeCountPred)
}
}
}
c.InvalidatePredicatesOnNode(nodeName, invalidPredicates)
}
// equivalencePod is the set of pod attributes which must match for two pods to
// be considered equivalent for scheduling purposes. For correctness, this must
// include any Pod field which is used by a FitPredicate.
//
// NOTE: For equivalence hash to be formally correct, lists and maps in the
// equivalencePod should be normalized. (e.g. by sorting them) However, the vast
// majority of equivalent pod classes are expected to be created from a single
// pod template, so they will | random_line_split | |
eqivalence.go | "
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
hashutil "k8s.io/kubernetes/pkg/util/hash"
"github.com/golang/glog"
)
// Cache saves and reuses the output of predicate functions. Use RunPredicate to
// get or update the cached results. An appropriate Invalidate* function should
// be called when some predicate results are no longer valid.
//
// Internally, results are keyed by node name, predicate name, and "equivalence
// class". (Equivalence class is defined in the `Class` type.) Saved results
// will be reused until an appropriate invalidation function is called.
type Cache struct {
mu sync.RWMutex
cache nodeMap
}
// NewCache returns an empty Cache.
func NewCache() *Cache {
return &Cache{
cache: make(nodeMap),
}
}
// Class represents a set of pods which are equivalent from the perspective of
// the scheduler. i.e. the scheduler would make the same decision for any pod
// from the same class.
type Class struct {
// Equivalence hash
hash uint64
}
// NewClass returns the equivalence class for a given Pod. The returned Class
// objects will be equal for two Pods in the same class. nil values should not
// be considered equal to each other.
//
// NOTE: Make sure to compare types of Class and not *Class.
// TODO(misterikkit): Return error instead of nil *Class.
func NewClass(pod *v1.Pod) *Class {
equivalencePod := getEquivalencePod(pod)
if equivalencePod != nil {
hash := fnv.New32a()
hashutil.DeepHashObject(hash, equivalencePod)
return &Class{
hash: uint64(hash.Sum32()),
}
}
return nil
}
// nodeMap stores PredicateCaches with node name as the key.
type nodeMap map[string]predicateMap
// predicateMap stores resultMaps with predicate name as the key.
type predicateMap map[string]resultMap
// resultMap stores PredicateResult with pod equivalence hash as the key.
type resultMap map[uint64]predicateResult
// predicateResult stores the output of a FitPredicate.
type predicateResult struct {
Fit bool
FailReasons []algorithm.PredicateFailureReason
}
// RunPredicate returns a cached predicate result. In case of a cache miss, the predicate will be
// run and its results cached for the next call.
//
// NOTE: RunPredicate will not update the equivalence cache if the given NodeInfo is stale.
func (c *Cache) RunPredicate(
pred algorithm.FitPredicate,
predicateKey string,
pod *v1.Pod,
meta algorithm.PredicateMetadata,
nodeInfo *schedulercache.NodeInfo,
equivClass *Class,
cache schedulercache.Cache,
) (bool, []algorithm.PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen during tests.
return false, []algorithm.PredicateFailureReason{}, fmt.Errorf("nodeInfo is nil or node is invalid")
}
result, ok := c.lookupResult(pod.GetName(), nodeInfo.Node().GetName(), predicateKey, equivClass.hash)
if ok {
return result.Fit, result.FailReasons, nil
}
fit, reasons, err := pred(pod, meta, nodeInfo)
if err != nil {
return fit, reasons, err
}
if cache != nil {
c.updateResult(pod.GetName(), predicateKey, fit, reasons, equivClass.hash, cache, nodeInfo)
}
return fit, reasons, nil
}
// updateResult updates the cached result of a predicate.
func (c *Cache) updateResult(
podName, predicateKey string,
fit bool,
reasons []algorithm.PredicateFailureReason,
equivalenceHash uint64,
cache schedulercache.Cache,
nodeInfo *schedulercache.NodeInfo,
) {
c.mu.Lock()
defer c.mu.Unlock()
if nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen during tests.
return
}
// Skip update if NodeInfo is stale.
if !cache.IsUpToDate(nodeInfo) {
return
}
nodeName := nodeInfo.Node().GetName()
if _, exist := c.cache[nodeName]; !exist {
c.cache[nodeName] = make(predicateMap)
}
predicateItem := predicateResult{
Fit: fit,
FailReasons: reasons,
}
// if cached predicate map already exists, just update the predicate by key
if predicates, ok := c.cache[nodeName][predicateKey]; ok {
// maps in golang are references, no need to add them back
predicates[equivalenceHash] = predicateItem
} else {
c.cache[nodeName][predicateKey] =
resultMap{
equivalenceHash: predicateItem,
}
}
glog.V(5).Infof("Cache update: node=%s,predicate=%s,pod=%s,value=%v", nodeName, predicateKey, podName, predicateItem)
}
// lookupResult returns cached predicate results and a bool saying whether a
// cache entry was found.
func (c *Cache) lookupResult(
podName, nodeName, predicateKey string,
equivalenceHash uint64,
) (value predicateResult, ok bool) {
c.mu.RLock()
defer c.mu.RUnlock()
glog.V(5).Infof("Cache lookup: node=%s,predicate=%s,pod=%s", nodeName, predicateKey, podName)
value, ok = c.cache[nodeName][predicateKey][equivalenceHash]
return value, ok
}
// InvalidatePredicates clears all cached results for the given predicates.
func (c *Cache) InvalidatePredicates(predicateKeys sets.String) {
if len(predicateKeys) == 0 {
return
}
c.mu.Lock()
defer c.mu.Unlock()
// c.cache uses nodeName as key, so we just iterate it and invalid given predicates
for _, predicates := range c.cache {
for predicateKey := range predicateKeys {
delete(predicates, predicateKey)
}
}
glog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys)
}
// InvalidatePredicatesOnNode clears cached results for the given predicates on one node.
func (c *Cache) InvalidatePredicatesOnNode(nodeName string, predicateKeys sets.String) {
if len(predicateKeys) == 0 {
return
}
c.mu.Lock()
defer c.mu.Unlock()
for predicateKey := range predicateKeys {
delete(c.cache[nodeName], predicateKey)
}
glog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys)
}
// InvalidateAllPredicatesOnNode clears all cached results for one node.
func (c *Cache) InvalidateAllPredicatesOnNode(nodeName string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.cache, nodeName)
glog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName)
}
// InvalidateCachedPredicateItemForPodAdd is a wrapper of
// InvalidateCachedPredicateItem for pod add case
// TODO: This does not belong with the equivalence cache implementation.
func (c *Cache) | (pod *v1.Pod, nodeName string) {
// MatchInterPodAffinity: we assume scheduler can make sure newly bound pod
// will not break the existing inter pod affinity. So we does not need to
// invalidate MatchInterPodAffinity when pod added.
//
// But when a pod is deleted, existing inter pod affinity may become invalid.
// (e.g. this pod was preferred by some else, or vice versa)
//
// NOTE: assumptions above will not stand when we implemented features like
// RequiredDuringSchedulingRequiredDuringExecution.
// NoDiskConflict: the newly scheduled pod fits to existing pods on this node,
// it will also fits to equivalence class of existing pods
// GeneralPredicates: will always be affected by adding a new pod
invalidPredicates := sets.NewString(predicates.GeneralPred)
// MaxPDVolumeCountPredicate: we check the volumes of pod to make decision.
for _, vol := range pod.Spec.Volumes {
if vol.PersistentVolumeClaim != nil {
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred, predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred)
} else {
if vol.AWSElasticBlockStore != nil {
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred)
}
if vol.GCEPersistentDisk != nil {
invalidPredicates.Insert(predicates.MaxGCEPDVolumeCountPred)
}
if vol.AzureDisk != nil {
invalidPredicates.Insert(predicates.MaxAzureDiskVolumeCountPred)
}
}
}
c.InvalidatePredicatesOnNode(nodeName, invalidPredicates)
}
// equivalencePod is the set of pod attributes which must match for two pods to
// be considered equivalent for scheduling purposes. For correctness, this must
// include any Pod field which is used by a FitPredicate.
//
// NOTE: For equivalence hash to be formally correct, lists and maps in the
// equivalencePod should be normalized. (e.g. by sorting them) However, the vast
// majority of equivalent pod classes are expected to be created from a single
// pod template, so they | InvalidateCachedPredicateItemForPodAdd | identifier_name |
eqivalence.go | "
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
hashutil "k8s.io/kubernetes/pkg/util/hash"
"github.com/golang/glog"
)
// Cache saves and reuses the output of predicate functions. Use RunPredicate to
// get or update the cached results. An appropriate Invalidate* function should
// be called when some predicate results are no longer valid.
//
// Internally, results are keyed by node name, predicate name, and "equivalence
// class". (Equivalence class is defined in the `Class` type.) Saved results
// will be reused until an appropriate invalidation function is called.
type Cache struct {
mu sync.RWMutex
cache nodeMap
}
// NewCache returns an empty Cache.
func NewCache() *Cache {
return &Cache{
cache: make(nodeMap),
}
}
// Class represents a set of pods which are equivalent from the perspective of
// the scheduler. i.e. the scheduler would make the same decision for any pod
// from the same class.
type Class struct {
// Equivalence hash
hash uint64
}
// NewClass returns the equivalence class for a given Pod. The returned Class
// objects will be equal for two Pods in the same class. nil values should not
// be considered equal to each other.
//
// NOTE: Make sure to compare types of Class and not *Class.
// TODO(misterikkit): Return error instead of nil *Class.
func NewClass(pod *v1.Pod) *Class {
equivalencePod := getEquivalencePod(pod)
if equivalencePod != nil {
hash := fnv.New32a()
hashutil.DeepHashObject(hash, equivalencePod)
return &Class{
hash: uint64(hash.Sum32()),
}
}
return nil
}
// nodeMap stores PredicateCaches with node name as the key.
type nodeMap map[string]predicateMap
// predicateMap stores resultMaps with predicate name as the key.
type predicateMap map[string]resultMap
// resultMap stores PredicateResult with pod equivalence hash as the key.
type resultMap map[uint64]predicateResult
// predicateResult stores the output of a FitPredicate.
type predicateResult struct {
Fit bool
FailReasons []algorithm.PredicateFailureReason
}
// RunPredicate returns a cached predicate result. In case of a cache miss, the predicate will be
// run and its results cached for the next call.
//
// NOTE: RunPredicate will not update the equivalence cache if the given NodeInfo is stale.
func (c *Cache) RunPredicate(
pred algorithm.FitPredicate,
predicateKey string,
pod *v1.Pod,
meta algorithm.PredicateMetadata,
nodeInfo *schedulercache.NodeInfo,
equivClass *Class,
cache schedulercache.Cache,
) (bool, []algorithm.PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen during tests.
return false, []algorithm.PredicateFailureReason{}, fmt.Errorf("nodeInfo is nil or node is invalid")
}
result, ok := c.lookupResult(pod.GetName(), nodeInfo.Node().GetName(), predicateKey, equivClass.hash)
if ok {
return result.Fit, result.FailReasons, nil
}
fit, reasons, err := pred(pod, meta, nodeInfo)
if err != nil {
return fit, reasons, err
}
if cache != nil {
c.updateResult(pod.GetName(), predicateKey, fit, reasons, equivClass.hash, cache, nodeInfo)
}
return fit, reasons, nil
}
// updateResult updates the cached result of a predicate.
func (c *Cache) updateResult(
podName, predicateKey string,
fit bool,
reasons []algorithm.PredicateFailureReason,
equivalenceHash uint64,
cache schedulercache.Cache,
nodeInfo *schedulercache.NodeInfo,
) {
c.mu.Lock()
defer c.mu.Unlock()
if nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen during tests.
return
}
// Skip update if NodeInfo is stale.
if !cache.IsUpToDate(nodeInfo) {
return
}
nodeName := nodeInfo.Node().GetName()
if _, exist := c.cache[nodeName]; !exist {
c.cache[nodeName] = make(predicateMap)
}
predicateItem := predicateResult{
Fit: fit,
FailReasons: reasons,
}
// if cached predicate map already exists, just update the predicate by key
if predicates, ok := c.cache[nodeName][predicateKey]; ok {
// maps in golang are references, no need to add them back
predicates[equivalenceHash] = predicateItem
} else {
c.cache[nodeName][predicateKey] =
resultMap{
equivalenceHash: predicateItem,
}
}
glog.V(5).Infof("Cache update: node=%s,predicate=%s,pod=%s,value=%v", nodeName, predicateKey, podName, predicateItem)
}
// lookupResult returns cached predicate results and a bool saying whether a
// cache entry was found.
func (c *Cache) lookupResult(
podName, nodeName, predicateKey string,
equivalenceHash uint64,
) (value predicateResult, ok bool) {
c.mu.RLock()
defer c.mu.RUnlock()
glog.V(5).Infof("Cache lookup: node=%s,predicate=%s,pod=%s", nodeName, predicateKey, podName)
value, ok = c.cache[nodeName][predicateKey][equivalenceHash]
return value, ok
}
// InvalidatePredicates clears all cached results for the given predicates.
func (c *Cache) InvalidatePredicates(predicateKeys sets.String) {
if len(predicateKeys) == 0 {
return
}
c.mu.Lock()
defer c.mu.Unlock()
// c.cache uses nodeName as key, so we just iterate it and invalid given predicates
for _, predicates := range c.cache {
for predicateKey := range predicateKeys {
delete(predicates, predicateKey)
}
}
glog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys)
}
// InvalidatePredicatesOnNode clears cached results for the given predicates on one node.
func (c *Cache) InvalidatePredicatesOnNode(nodeName string, predicateKeys sets.String) {
if len(predicateKeys) == 0 {
return
}
c.mu.Lock()
defer c.mu.Unlock()
for predicateKey := range predicateKeys |
glog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys)
}
// InvalidateAllPredicatesOnNode clears all cached results for one node.
func (c *Cache) InvalidateAllPredicatesOnNode(nodeName string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.cache, nodeName)
glog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName)
}
// InvalidateCachedPredicateItemForPodAdd is a wrapper of
// InvalidateCachedPredicateItem for pod add case
// TODO: This does not belong with the equivalence cache implementation.
func (c *Cache) InvalidateCachedPredicateItemForPodAdd(pod *v1.Pod, nodeName string) {
// MatchInterPodAffinity: we assume scheduler can make sure newly bound pod
// will not break the existing inter pod affinity. So we does not need to
// invalidate MatchInterPodAffinity when pod added.
//
// But when a pod is deleted, existing inter pod affinity may become invalid.
// (e.g. this pod was preferred by some else, or vice versa)
//
// NOTE: assumptions above will not stand when we implemented features like
// RequiredDuringSchedulingRequiredDuringExecution.
// NoDiskConflict: the newly scheduled pod fits to existing pods on this node,
// it will also fits to equivalence class of existing pods
// GeneralPredicates: will always be affected by adding a new pod
invalidPredicates := sets.NewString(predicates.GeneralPred)
// MaxPDVolumeCountPredicate: we check the volumes of pod to make decision.
for _, vol := range pod.Spec.Volumes {
if vol.PersistentVolumeClaim != nil {
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred, predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred)
} else {
if vol.AWSElasticBlockStore != nil {
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred)
}
if vol.GCEPersistentDisk != nil {
invalidPredicates.Insert(predicates.MaxGCEPDVolumeCountPred)
}
if vol.AzureDisk != nil {
invalidPredicates.Insert(predicates.MaxAzureDiskVolumeCountPred)
}
}
}
c.InvalidatePredicatesOnNode(nodeName, invalidPredicates)
}
// equivalencePod is the set of pod attributes which must match for two pods to
// be considered equivalent for scheduling purposes. For correctness, this must
// include any Pod field which is used by a FitPredicate.
//
// NOTE: For equivalence hash to be formally correct, lists and maps in the
// equivalencePod should be normalized. (e.g. by sorting them) However, the vast
// majority of equivalent pod classes are expected to be created from a single
// pod template, so they | {
delete(c.cache[nodeName], predicateKey)
} | conditional_block |
image_mapper.go | err := loadImageStreamTransforms(input, is, allowMissingImages, path)
if err != nil {
return nil, err
}
imageMapper, err := NewImageMapper(references)
if err != nil {
return nil, err
}
versionMapper := NewComponentVersionsMapper(input.Name, versions, tagsByName)
return func(data []byte) ([]byte, error) {
data, err := imageMapper(data)
if err != nil {
return nil, err
}
return versionMapper(data)
}, nil
}
func loadImageStreamTransforms(input, local *imageapi.ImageStream, allowMissingImages bool, src string) (ComponentVersions, map[string][]string, map[string]ImageReference, error) | }
return nil, nil, nil, fmt.Errorf("no input image tag named %q", tag.Name)
}
references[tag.Name] = ref
}
// load all version values from the input stream, including any defaults, to perform
// version substitution in the returned manifests.
versions := make(ComponentVersions)
tagsByName := make(map[string][]string)
for _, tag := range input.Spec.Tags {
if _, ok := references[tag.Name]; !ok {
continue
}
value, ok := tag.Annotations[annotationBuildVersions]
if !ok {
continue
}
displayNameValue := tag.Annotations[annotationBuildVersionsDisplayNames]
klog.V(4).Infof("Found build versions from %s: %s (%s)", tag.Name, value, displayNameValue)
items, err := parseComponentVersionsLabel(value, displayNameValue)
if err != nil {
return nil, nil, nil, fmt.Errorf("input image stream has an invalid version annotation for tag %q: %v", tag.Name, value)
}
for k, v := range items {
existing, ok := versions[k]
if ok {
if existing.Version != v.Version {
return nil, nil, nil, fmt.Errorf("input image stream has multiple versions defined for version %s: %s defines %s but was already set to %s on %s", k, tag.Name, v, existing, strings.Join(tagsByName[k], ", "))
}
} else {
versions[k] = v
klog.V(4).Infof("Found version %s=%s from %s", k, v.Version, tag.Name)
}
tagsByName[k] = append(tagsByName[k], tag.Name)
}
}
defaults, err := parseComponentVersionsLabel(input.Annotations[annotationBuildVersions], input.Annotations[annotationBuildVersionsDisplayNames])
if err != nil {
return nil, nil, nil, fmt.Errorf("unable to read default versions label on input image stream: %v", err)
}
for k, v := range defaults {
if _, ok := versions[k]; !ok {
versions[k] = v
}
}
return versions, tagsByName, references, nil
}
type ImageReference struct {
SourceRepository string
TargetPullSpec string
}
func NopManifestMapper(data []byte) ([]byte, error) {
return data, nil
}
// patternImageFormat attempts to match a docker pull spec by prefix (%s) and capture the
// prefix and either a tag or digest. It requires leading and trailing whitespace, quotes, or
// end of file.
const patternImageFormat = `([\W]|^)(%s)(:[\w][\w.-]{0,127}|@[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{2,})?([\s"']|$)`
func NewImageMapper(images map[string]ImageReference) (ManifestMapper, error) {
repositories := make([]string, 0, len(images))
bySource := make(map[string]string)
for name, ref := range images {
if len(ref.SourceRepository) == 0 {
return nil, fmt.Errorf("an empty source repository is not allowed for name %q", name)
}
if existing, ok := bySource[ref.SourceRepository]; ok {
return nil, fmt.Errorf("the source repository %q was defined more than once (for %q and %q)", ref.SourceRepository, existing, name)
}
bySource[ref.SourceRepository] = name
repositories = append(repositories, regexp.QuoteMeta(ref.SourceRepository))
}
if len(repositories) == 0 {
klog.V(5).Infof("No images are mapped, will not replace any contents")
return NopManifestMapper, nil
}
pattern := fmt.Sprintf(patternImageFormat, strings.Join(repositories, "|"))
re := regexp.MustCompile(pattern)
return func(data []byte) ([]byte, error) {
out := re.ReplaceAllFunc(data, func(in []byte) []byte {
parts := re.FindSubmatch(in)
repository := string(parts[2])
name, ok := bySource[repository]
if !ok {
klog.V(4).Infof("found potential image %q, but no matching definition", repository)
return in
}
ref := images[name]
suffix := parts[3]
klog.V(2).Infof("found repository %q with locator %q in the input, switching to %q (from pattern %s)", string(repository), string(suffix), ref.TargetPullSpec, pattern)
switch {
case len(suffix) == 0:
// we found a repository, but no tag or digest (implied latest), or we got an exact match
return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
case suffix[0] == '@':
// we got a digest
return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
default:
// TODO: we didn't get a digest, so we have to decide what to replace
return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
}
})
return out, nil
}, nil
}
// exactImageFormat attempts to match a string on word boundaries
const exactImageFormat = `\b%s\b`
func NewExactMapper(mappings map[string]string) (ManifestMapper, error) {
patterns := make(map[string]*regexp.Regexp)
for from, to := range mappings {
pattern := fmt.Sprintf(exactImageFormat, regexp.QuoteMeta(from))
re, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
patterns[to] = re
}
return func(data []byte) ([]byte, error) {
for to, pattern := range patterns {
data = pattern.ReplaceAll(data, []byte(to))
}
return data, nil
}, nil
}
func ComponentReferencesForImageStream(is *imageapi.ImageStream) (func(string) imagereference.DockerImageReference, error) {
components := make(map[string]imagereference.DockerImageReference)
for _, tag := range is.Spec.Tags {
if tag.From == nil || tag.From.Kind != "DockerImage" {
continue
}
ref, err := imagereference.Parse(tag.From.Name)
if err != nil {
return nil, fmt.Errorf("reference for %q is invalid: %v", tag.Name, err)
}
components[tag.Name] = ref
}
return func(component string) imagereference.DockerImageReference {
ref, ok := components[component]
if !ok {
panic(fmt.Errorf("unknown component %s", component))
}
return ref
}, nil
}
const (
componentVersionFormat = `([\W]|^)0\.0\.1-snapshot([a-z0-9\-]*)`
)
// NewComponentVersionsMapper substitutes strings of the form 0.0.1-snapshot with releaseName and strings
// of the form 0.0.1-snapshot-[component] with the version value located in versions, or returns an error.
// tagsByName allows the caller to return an error if references are ambiguous (two tags declare different
// version values) - if that replacement is detected and tagsByName[component] has more than one entry,
// then an error is returned by the ManifestMapper.
// If the input release name is not a semver, a request for `0.0.1-snapshot` will be left unmodified.
func NewComponentVersionsMapper(releaseName string, versions ComponentVersions, tagsByName map[string][] | {
references := make(map[string]ImageReference)
for _, tag := range local.Spec.Tags {
if tag.From == nil || tag.From.Kind != "DockerImage" {
continue
}
if len(tag.From.Name) == 0 {
return nil, nil, nil, fmt.Errorf("no from.name for the tag %s", tag.Name)
}
ref := ImageReference{SourceRepository: tag.From.Name}
for _, inputTag := range input.Spec.Tags {
if inputTag.Name == tag.Name {
ref.TargetPullSpec = inputTag.From.Name
break
}
}
if len(ref.TargetPullSpec) == 0 {
if allowMissingImages {
klog.V(2).Infof("Image file %q referenced an image %q that is not part of the input images, skipping", src, tag.From.Name)
continue | identifier_body |
image_mapper.go | err := loadImageStreamTransforms(input, is, allowMissingImages, path)
if err != nil {
return nil, err
}
imageMapper, err := NewImageMapper(references)
if err != nil {
return nil, err
}
versionMapper := NewComponentVersionsMapper(input.Name, versions, tagsByName)
return func(data []byte) ([]byte, error) {
data, err := imageMapper(data)
if err != nil {
return nil, err
}
return versionMapper(data)
}, nil
}
func loadImageStreamTransforms(input, local *imageapi.ImageStream, allowMissingImages bool, src string) (ComponentVersions, map[string][]string, map[string]ImageReference, error) {
references := make(map[string]ImageReference)
for _, tag := range local.Spec.Tags {
if tag.From == nil || tag.From.Kind != "DockerImage" {
continue
}
if len(tag.From.Name) == 0 {
return nil, nil, nil, fmt.Errorf("no from.name for the tag %s", tag.Name)
}
ref := ImageReference{SourceRepository: tag.From.Name}
for _, inputTag := range input.Spec.Tags {
if inputTag.Name == tag.Name {
ref.TargetPullSpec = inputTag.From.Name
break
}
}
if len(ref.TargetPullSpec) == 0 {
if allowMissingImages {
klog.V(2).Infof("Image file %q referenced an image %q that is not part of the input images, skipping", src, tag.From.Name)
continue
}
return nil, nil, nil, fmt.Errorf("no input image tag named %q", tag.Name)
}
references[tag.Name] = ref
}
// load all version values from the input stream, including any defaults, to perform
// version substitution in the returned manifests.
versions := make(ComponentVersions)
tagsByName := make(map[string][]string)
for _, tag := range input.Spec.Tags {
if _, ok := references[tag.Name]; !ok {
continue
}
value, ok := tag.Annotations[annotationBuildVersions]
if !ok {
continue
}
displayNameValue := tag.Annotations[annotationBuildVersionsDisplayNames]
klog.V(4).Infof("Found build versions from %s: %s (%s)", tag.Name, value, displayNameValue)
items, err := parseComponentVersionsLabel(value, displayNameValue)
if err != nil {
return nil, nil, nil, fmt.Errorf("input image stream has an invalid version annotation for tag %q: %v", tag.Name, value)
}
for k, v := range items {
existing, ok := versions[k]
if ok {
if existing.Version != v.Version {
return nil, nil, nil, fmt.Errorf("input image stream has multiple versions defined for version %s: %s defines %s but was already set to %s on %s", k, tag.Name, v, existing, strings.Join(tagsByName[k], ", "))
}
} else {
versions[k] = v
klog.V(4).Infof("Found version %s=%s from %s", k, v.Version, tag.Name)
}
tagsByName[k] = append(tagsByName[k], tag.Name)
}
}
defaults, err := parseComponentVersionsLabel(input.Annotations[annotationBuildVersions], input.Annotations[annotationBuildVersionsDisplayNames])
if err != nil {
return nil, nil, nil, fmt.Errorf("unable to read default versions label on input image stream: %v", err)
}
for k, v := range defaults {
if _, ok := versions[k]; !ok {
versions[k] = v
}
}
return versions, tagsByName, references, nil
}
type ImageReference struct {
SourceRepository string
TargetPullSpec string
}
func NopManifestMapper(data []byte) ([]byte, error) {
return data, nil
}
// patternImageFormat attempts to match a docker pull spec by prefix (%s) and capture the
// prefix and either a tag or digest. It requires leading and trailing whitespace, quotes, or
// end of file.
const patternImageFormat = `([\W]|^)(%s)(:[\w][\w.-]{0,127}|@[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{2,})?([\s"']|$)`
func | (images map[string]ImageReference) (ManifestMapper, error) {
repositories := make([]string, 0, len(images))
bySource := make(map[string]string)
for name, ref := range images {
if len(ref.SourceRepository) == 0 {
return nil, fmt.Errorf("an empty source repository is not allowed for name %q", name)
}
if existing, ok := bySource[ref.SourceRepository]; ok {
return nil, fmt.Errorf("the source repository %q was defined more than once (for %q and %q)", ref.SourceRepository, existing, name)
}
bySource[ref.SourceRepository] = name
repositories = append(repositories, regexp.QuoteMeta(ref.SourceRepository))
}
if len(repositories) == 0 {
klog.V(5).Infof("No images are mapped, will not replace any contents")
return NopManifestMapper, nil
}
pattern := fmt.Sprintf(patternImageFormat, strings.Join(repositories, "|"))
re := regexp.MustCompile(pattern)
return func(data []byte) ([]byte, error) {
out := re.ReplaceAllFunc(data, func(in []byte) []byte {
parts := re.FindSubmatch(in)
repository := string(parts[2])
name, ok := bySource[repository]
if !ok {
klog.V(4).Infof("found potential image %q, but no matching definition", repository)
return in
}
ref := images[name]
suffix := parts[3]
klog.V(2).Infof("found repository %q with locator %q in the input, switching to %q (from pattern %s)", string(repository), string(suffix), ref.TargetPullSpec, pattern)
switch {
case len(suffix) == 0:
// we found a repository, but no tag or digest (implied latest), or we got an exact match
return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
case suffix[0] == '@':
// we got a digest
return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
default:
// TODO: we didn't get a digest, so we have to decide what to replace
return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
}
})
return out, nil
}, nil
}
// exactImageFormat attempts to match a string on word boundaries
const exactImageFormat = `\b%s\b`
func NewExactMapper(mappings map[string]string) (ManifestMapper, error) {
patterns := make(map[string]*regexp.Regexp)
for from, to := range mappings {
pattern := fmt.Sprintf(exactImageFormat, regexp.QuoteMeta(from))
re, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
patterns[to] = re
}
return func(data []byte) ([]byte, error) {
for to, pattern := range patterns {
data = pattern.ReplaceAll(data, []byte(to))
}
return data, nil
}, nil
}
func ComponentReferencesForImageStream(is *imageapi.ImageStream) (func(string) imagereference.DockerImageReference, error) {
components := make(map[string]imagereference.DockerImageReference)
for _, tag := range is.Spec.Tags {
if tag.From == nil || tag.From.Kind != "DockerImage" {
continue
}
ref, err := imagereference.Parse(tag.From.Name)
if err != nil {
return nil, fmt.Errorf("reference for %q is invalid: %v", tag.Name, err)
}
components[tag.Name] = ref
}
return func(component string) imagereference.DockerImageReference {
ref, ok := components[component]
if !ok {
panic(fmt.Errorf("unknown component %s", component))
}
return ref
}, nil
}
const (
componentVersionFormat = `([\W]|^)0\.0\.1-snapshot([a-z0-9\-]*)`
)
// NewComponentVersionsMapper substitutes strings of the form 0.0.1-snapshot with releaseName and strings
// of the form 0.0.1-snapshot-[component] with the version value located in versions, or returns an error.
// tagsByName allows the caller to return an error if references are ambiguous (two tags declare different
// version values) - if that replacement is detected and tagsByName[component] has more than one entry,
// then an error is returned by the ManifestMapper.
// If the input release name is not a semver, a request for `0.0.1-snapshot` will be left unmodified.
func NewComponentVersionsMapper(releaseName string, versions ComponentVersions, tagsByName map[string | NewImageMapper | identifier_name |
image_mapper.go | arts := re.FindSubmatch(in)
repository := string(parts[2])
name, ok := bySource[repository]
if !ok {
klog.V(4).Infof("found potential image %q, but no matching definition", repository)
return in
}
ref := images[name]
suffix := parts[3]
klog.V(2).Infof("found repository %q with locator %q in the input, switching to %q (from pattern %s)", string(repository), string(suffix), ref.TargetPullSpec, pattern)
switch {
case len(suffix) == 0:
// we found a repository, but no tag or digest (implied latest), or we got an exact match
return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
case suffix[0] == '@':
// we got a digest
return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
default:
// TODO: we didn't get a digest, so we have to decide what to replace
return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
}
})
return out, nil
}, nil
}
// exactImageFormat attempts to match a string on word boundaries
const exactImageFormat = `\b%s\b`
func NewExactMapper(mappings map[string]string) (ManifestMapper, error) {
patterns := make(map[string]*regexp.Regexp)
for from, to := range mappings {
pattern := fmt.Sprintf(exactImageFormat, regexp.QuoteMeta(from))
re, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
patterns[to] = re
}
return func(data []byte) ([]byte, error) {
for to, pattern := range patterns {
data = pattern.ReplaceAll(data, []byte(to))
}
return data, nil
}, nil
}
func ComponentReferencesForImageStream(is *imageapi.ImageStream) (func(string) imagereference.DockerImageReference, error) {
components := make(map[string]imagereference.DockerImageReference)
for _, tag := range is.Spec.Tags {
if tag.From == nil || tag.From.Kind != "DockerImage" {
continue
}
ref, err := imagereference.Parse(tag.From.Name)
if err != nil {
return nil, fmt.Errorf("reference for %q is invalid: %v", tag.Name, err)
}
components[tag.Name] = ref
}
return func(component string) imagereference.DockerImageReference {
ref, ok := components[component]
if !ok {
panic(fmt.Errorf("unknown component %s", component))
}
return ref
}, nil
}
const (
componentVersionFormat = `([\W]|^)0\.0\.1-snapshot([a-z0-9\-]*)`
)
// NewComponentVersionsMapper substitutes strings of the form 0.0.1-snapshot with releaseName and strings
// of the form 0.0.1-snapshot-[component] with the version value located in versions, or returns an error.
// tagsByName allows the caller to return an error if references are ambiguous (two tags declare different
// version values) - if that replacement is detected and tagsByName[component] has more than one entry,
// then an error is returned by the ManifestMapper.
// If the input release name is not a semver, a request for `0.0.1-snapshot` will be left unmodified.
func NewComponentVersionsMapper(releaseName string, versions ComponentVersions, tagsByName map[string][]string) ManifestMapper {
if v, err := semver.Parse(releaseName); err == nil {
v.Build = nil
releaseName = v.String()
} else {
releaseName = ""
}
re, err := regexp.Compile(componentVersionFormat)
if err != nil {
return func([]byte) ([]byte, error) {
return nil, fmt.Errorf("component versions mapper regex: %v", err)
}
}
return func(data []byte) ([]byte, error) {
var missing []string
var conflicts []string
data = re.ReplaceAllFunc(data, func(part []byte) []byte {
matches := re.FindSubmatch(part)
if matches == nil {
return part
}
key := string(matches[2])
if len(key) == 0 && len(releaseName) > 0 {
buf := &bytes.Buffer{}
buf.Write(matches[1])
buf.WriteString(releaseName)
return buf.Bytes()
}
if !strings.HasPrefix(key, "-") {
return part
}
key = key[1:]
value, ok := versions[key]
if !ok {
missing = append(missing, key)
return part
}
if len(tagsByName[key]) > 1 {
conflicts = append(conflicts, key)
return part
}
buf := &bytes.Buffer{}
buf.Write(matches[1])
buf.WriteString(value.Version)
return buf.Bytes()
})
if len(missing) > 0 {
switch len(missing) {
case 1:
if len(missing[0]) == 0 {
return nil, fmt.Errorf("empty version references are not allowed")
}
return nil, fmt.Errorf("unknown version reference %q", missing[0])
default:
return nil, fmt.Errorf("unknown version references: %s", strings.Join(missing, ", "))
}
}
if len(conflicts) > 0 {
allImageTags := tagsByName[conflicts[0]]
sort.Strings(allImageTags)
return nil, fmt.Errorf("the version for %q is inconsistent across the referenced images: %s", conflicts[0], strings.Join(allImageTags, ", "))
}
return data, nil
}
}
var (
// reAllowedVersionKey limits the allowed component name to a strict subset
reAllowedVersionKey = regexp.MustCompile(`^[a-z0-9]+[\-a-z0-9]*[a-z0-9]+$`)
// reAllowedDisplayNameKey limits the allowed component name to a strict subset
reAllowedDisplayNameKey = regexp.MustCompile(`^[a-zA-Z0-9\-\:\s\(\)]+$`)
)
// ComponentVersion includes the version and optional display name.
type ComponentVersion struct {
Version string
DisplayName string
}
// String returns the version of this component.
func (v ComponentVersion) String() string {
return v.Version
}
// ComponentVersions is a map of component names to semantic versions. Names are
// lowercase alphanumeric and dashes. Semantic versions will have all build
// labels removed, but prerelease segments are preserved.
type ComponentVersions map[string]ComponentVersion
// OrderedKeys returns the keys in this map in lexigraphic order.
func (v ComponentVersions) OrderedKeys() []string {
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
func (v ComponentVersions) String() string {
return v.VersionLabel()
}
// VersionLabel formats the ComponentVersions into a valid
// versions label.
func (v ComponentVersions) VersionLabel() string {
var keys []string
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
buf := &bytes.Buffer{}
for i, k := range keys {
if i != 0 {
buf.WriteRune(',')
}
fmt.Fprintf(buf, "%s=%s", k, v[k].Version)
}
return buf.String()
}
// DisplayNameLabel formats the ComponentVersions into a valid display
// name label.
func (v ComponentVersions) DisplayNameLabel() string {
var keys []string
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
buf := &bytes.Buffer{}
for i, k := range keys {
if i != 0 {
buf.WriteRune(',')
}
if len(v[k].DisplayName) == 0 {
continue
}
fmt.Fprintf(buf, "%s=%s", k, v[k].DisplayName)
}
return buf.String()
}
// parseComponentVersionsLabel returns the version labels specified in the string or
// an error. Labels are comma-delimited, key=value pairs, and surrounding whitespace is
// ignored. Names must be a-z, 0-9, or have interior dashes. All values must be
// semantic versions. The displayNames label is optional (if provided) and will be combined
// with the valid versions.
func parseComponentVersionsLabel(label, displayNames string) (ComponentVersions, error) {
label = strings.TrimSpace(label)
if len(label) == 0 {
return nil, nil
}
var names map[string]string
if len(displayNames) > 0 {
names = make(map[string]string)
for _, pair := range strings.Split(displayNames, ",") {
pair = strings.TrimSpace(pair)
parts := strings.SplitN(pair, "=", 2)
if len(parts) == 1 {
return nil, fmt.Errorf("the display name pair %q must be NAME=DISPLAYNAME", pair)
}
if len(parts[0]) < 2 | {
return nil, fmt.Errorf("the version name %q must be at least 2 characters", parts[0])
} | conditional_block | |
image_mapper.go | build versions from %s: %s (%s)", tag.Name, value, displayNameValue)
items, err := parseComponentVersionsLabel(value, displayNameValue)
if err != nil {
return nil, nil, nil, fmt.Errorf("input image stream has an invalid version annotation for tag %q: %v", tag.Name, value)
}
for k, v := range items {
existing, ok := versions[k]
if ok {
if existing.Version != v.Version {
return nil, nil, nil, fmt.Errorf("input image stream has multiple versions defined for version %s: %s defines %s but was already set to %s on %s", k, tag.Name, v, existing, strings.Join(tagsByName[k], ", "))
}
} else {
versions[k] = v
klog.V(4).Infof("Found version %s=%s from %s", k, v.Version, tag.Name)
}
tagsByName[k] = append(tagsByName[k], tag.Name)
}
}
defaults, err := parseComponentVersionsLabel(input.Annotations[annotationBuildVersions], input.Annotations[annotationBuildVersionsDisplayNames])
if err != nil {
return nil, nil, nil, fmt.Errorf("unable to read default versions label on input image stream: %v", err)
}
for k, v := range defaults {
if _, ok := versions[k]; !ok {
versions[k] = v
}
}
return versions, tagsByName, references, nil
}
type ImageReference struct {
SourceRepository string
TargetPullSpec string
}
func NopManifestMapper(data []byte) ([]byte, error) {
return data, nil
}
// patternImageFormat attempts to match a docker pull spec by prefix (%s) and capture the
// prefix and either a tag or digest. It requires leading and trailing whitespace, quotes, or
// end of file.
const patternImageFormat = `([\W]|^)(%s)(:[\w][\w.-]{0,127}|@[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{2,})?([\s"']|$)`
func NewImageMapper(images map[string]ImageReference) (ManifestMapper, error) {
repositories := make([]string, 0, len(images))
bySource := make(map[string]string)
for name, ref := range images {
if len(ref.SourceRepository) == 0 {
return nil, fmt.Errorf("an empty source repository is not allowed for name %q", name)
}
if existing, ok := bySource[ref.SourceRepository]; ok {
return nil, fmt.Errorf("the source repository %q was defined more than once (for %q and %q)", ref.SourceRepository, existing, name)
}
bySource[ref.SourceRepository] = name
repositories = append(repositories, regexp.QuoteMeta(ref.SourceRepository))
}
if len(repositories) == 0 {
klog.V(5).Infof("No images are mapped, will not replace any contents")
return NopManifestMapper, nil
}
pattern := fmt.Sprintf(patternImageFormat, strings.Join(repositories, "|"))
re := regexp.MustCompile(pattern)
return func(data []byte) ([]byte, error) {
out := re.ReplaceAllFunc(data, func(in []byte) []byte {
parts := re.FindSubmatch(in)
repository := string(parts[2])
name, ok := bySource[repository]
if !ok {
klog.V(4).Infof("found potential image %q, but no matching definition", repository)
return in
}
ref := images[name]
suffix := parts[3]
klog.V(2).Infof("found repository %q with locator %q in the input, switching to %q (from pattern %s)", string(repository), string(suffix), ref.TargetPullSpec, pattern)
switch {
case len(suffix) == 0:
// we found a repository, but no tag or digest (implied latest), or we got an exact match
return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
case suffix[0] == '@':
// we got a digest
return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
default:
// TODO: we didn't get a digest, so we have to decide what to replace
return []byte(string(parts[1]) + ref.TargetPullSpec + string(parts[4]))
}
})
return out, nil
}, nil
}
// exactImageFormat attempts to match a string on word boundaries
const exactImageFormat = `\b%s\b`
func NewExactMapper(mappings map[string]string) (ManifestMapper, error) {
patterns := make(map[string]*regexp.Regexp)
for from, to := range mappings {
pattern := fmt.Sprintf(exactImageFormat, regexp.QuoteMeta(from))
re, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
patterns[to] = re
}
return func(data []byte) ([]byte, error) {
for to, pattern := range patterns {
data = pattern.ReplaceAll(data, []byte(to))
}
return data, nil
}, nil
}
func ComponentReferencesForImageStream(is *imageapi.ImageStream) (func(string) imagereference.DockerImageReference, error) {
components := make(map[string]imagereference.DockerImageReference)
for _, tag := range is.Spec.Tags {
if tag.From == nil || tag.From.Kind != "DockerImage" {
continue
}
ref, err := imagereference.Parse(tag.From.Name)
if err != nil {
return nil, fmt.Errorf("reference for %q is invalid: %v", tag.Name, err)
}
components[tag.Name] = ref
}
return func(component string) imagereference.DockerImageReference {
ref, ok := components[component]
if !ok {
panic(fmt.Errorf("unknown component %s", component))
}
return ref
}, nil
}
const (
componentVersionFormat = `([\W]|^)0\.0\.1-snapshot([a-z0-9\-]*)`
)
// NewComponentVersionsMapper substitutes strings of the form 0.0.1-snapshot with releaseName and strings
// of the form 0.0.1-snapshot-[component] with the version value located in versions, or returns an error.
// tagsByName allows the caller to return an error if references are ambiguous (two tags declare different
// version values) - if that replacement is detected and tagsByName[component] has more than one entry,
// then an error is returned by the ManifestMapper.
// If the input release name is not a semver, a request for `0.0.1-snapshot` will be left unmodified.
func NewComponentVersionsMapper(releaseName string, versions ComponentVersions, tagsByName map[string][]string) ManifestMapper {
if v, err := semver.Parse(releaseName); err == nil {
v.Build = nil
releaseName = v.String()
} else {
releaseName = ""
}
re, err := regexp.Compile(componentVersionFormat)
if err != nil {
return func([]byte) ([]byte, error) {
return nil, fmt.Errorf("component versions mapper regex: %v", err)
}
}
return func(data []byte) ([]byte, error) {
var missing []string
var conflicts []string
data = re.ReplaceAllFunc(data, func(part []byte) []byte {
matches := re.FindSubmatch(part)
if matches == nil {
return part
}
key := string(matches[2])
if len(key) == 0 && len(releaseName) > 0 {
buf := &bytes.Buffer{}
buf.Write(matches[1])
buf.WriteString(releaseName)
return buf.Bytes()
}
if !strings.HasPrefix(key, "-") {
return part
}
key = key[1:]
value, ok := versions[key]
if !ok {
missing = append(missing, key)
return part
}
if len(tagsByName[key]) > 1 {
conflicts = append(conflicts, key)
return part
}
buf := &bytes.Buffer{}
buf.Write(matches[1])
buf.WriteString(value.Version)
return buf.Bytes()
})
if len(missing) > 0 {
switch len(missing) {
case 1:
if len(missing[0]) == 0 {
return nil, fmt.Errorf("empty version references are not allowed")
}
return nil, fmt.Errorf("unknown version reference %q", missing[0])
default:
return nil, fmt.Errorf("unknown version references: %s", strings.Join(missing, ", "))
}
}
if len(conflicts) > 0 {
allImageTags := tagsByName[conflicts[0]]
sort.Strings(allImageTags)
return nil, fmt.Errorf("the version for %q is inconsistent across the referenced images: %s", conflicts[0], strings.Join(allImageTags, ", "))
}
return data, nil
}
} |
var ( | random_line_split | |
serde_snapshot.rs | genesis_config::ClusterType,
genesis_config::GenesisConfig,
hard_forks::HardForks,
hash::Hash,
inflation::Inflation,
pubkey::Pubkey,
},
std::{
collections::{HashMap, HashSet},
io::{BufReader, BufWriter, Read, Write},
path::{Path, PathBuf},
result::Result,
sync::{atomic::Ordering, Arc, RwLock},
time::Instant,
},
};
#[cfg(RUSTC_WITH_SPECIALIZATION)]
use solana_frozen_abi::abi_example::IgnoreAsHelper;
mod common;
mod future;
mod tests;
mod utils;
use future::Context as TypeContextFuture;
#[allow(unused_imports)]
use utils::{serialize_iter_as_map, serialize_iter_as_seq, serialize_iter_as_tuple};
// a number of test cases in accounts_db use this
#[cfg(test)]
pub(crate) use self::tests::reconstruct_accounts_db_via_serialization;
pub(crate) use crate::accounts_db::{SnapshotStorage, SnapshotStorages};
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum SerdeStyle {
NEWER,
}
const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024;
#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample)]
struct AccountsDbFields<T>(HashMap<Slot, Vec<T>>, u64, Slot, BankHashInfo);
trait TypeContext<'a> {
type SerializableAccountStorageEntry: Serialize
+ DeserializeOwned
+ From<&'a AccountStorageEntry>
+ Into<AccountStorageEntry>;
fn serialize_bank_and_storage<S: serde::ser::Serializer>(
serializer: S,
serializable_bank: &SerializableBankAndStorage<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn serialize_accounts_db_fields<S: serde::ser::Serializer>(
serializer: S,
serializable_db: &SerializableAccountsDB<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn deserialize_bank_fields<R>(
stream: &mut BufReader<R>,
) -> Result<
(
BankFieldsToDeserialize,
AccountsDbFields<Self::SerializableAccountStorageEntry>,
),
Error,
>
where
R: Read;
fn deserialize_accounts_db_fields<R>(
stream: &mut BufReader<R>,
) -> Result<AccountsDbFields<Self::SerializableAccountStorageEntry>, Error>
where
R: Read;
}
fn deserialize_from<R, T>(reader: R) -> bincode::Result<T>
where
R: Read,
T: DeserializeOwned,
{
bincode::options()
.with_limit(MAX_STREAM_SIZE)
.with_fixint_encoding()
.allow_trailing_bytes()
.deserialize_from::<R, T>(reader)
}
pub(crate) fn bank_from_stream<R, P>(
serde_style: SerdeStyle,
stream: &mut BufReader<R>,
append_vecs_path: P,
account_paths: &[PathBuf],
genesis_config: &GenesisConfig,
frozen_account_pubkeys: &[Pubkey],
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
) -> std::result::Result<Bank, Error>
where
R: Read,
P: AsRef<Path>,
{
macro_rules! INTO {
($x:ident) => {{
let (bank_fields, accounts_db_fields) = $x::deserialize_bank_fields(stream)?;
let bank = reconstruct_bank_from_fields(
bank_fields,
accounts_db_fields,
genesis_config,
frozen_account_pubkeys,
account_paths,
append_vecs_path,
debug_keys,
additional_builtins,
)?;
Ok(bank)
}};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_from_stream error: {:?}", err);
err
})
}
pub(crate) fn bank_to_stream<W>(
serde_style: SerdeStyle,
stream: &mut BufWriter<W>,
bank: &Bank,
snapshot_storages: &[SnapshotStorage],
) -> Result<(), Error>
where
W: Write,
{
macro_rules! INTO {
($x:ident) => {
bincode::serialize_into(
stream,
&SerializableBankAndStorage::<$x> {
bank,
snapshot_storages,
phantom: std::marker::PhantomData::default(),
},
)
};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_to_stream error: {:?}", err);
err
})
}
struct SerializableBankAndStorage<'a, C> {
bank: &'a Bank,
snapshot_storages: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableBankAndStorage<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_bank_and_storage(serializer, self)
}
}
struct SerializableAccountsDB<'a, C> {
accounts_db: &'a AccountsDB,
slot: Slot,
account_storage_entries: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_accounts_db_fields(serializer, self)
}
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl<'a, C> IgnoreAsHelper for SerializableAccountsDB<'a, C> {}
fn reconstruct_bank_from_fields<E, P>(
bank_fields: BankFieldsToDeserialize,
accounts_db_fields: AccountsDbFields<E>, | frozen_account_pubkeys: &[Pubkey],
account_paths: &[PathBuf],
append_vecs_path: P,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
) -> Result<Bank, Error>
where
E: Into<AccountStorageEntry>,
P: AsRef<Path>,
{
let mut accounts_db = reconstruct_accountsdb_from_fields(
accounts_db_fields,
account_paths,
append_vecs_path,
&genesis_config.cluster_type,
)?;
accounts_db.freeze_accounts(&bank_fields.ancestors, frozen_account_pubkeys);
let bank_rc = BankRc::new(Accounts::new_empty(accounts_db), bank_fields.slot);
let bank = Bank::new_from_fields(
bank_rc,
genesis_config,
bank_fields,
debug_keys,
additional_builtins,
);
Ok(bank)
}
fn reconstruct_accountsdb_from_fields<E, P>(
accounts_db_fields: AccountsDbFields<E>,
account_paths: &[PathBuf],
stream_append_vecs_path: P,
cluster_type: &ClusterType,
) -> Result<AccountsDB, Error>
where
E: Into<AccountStorageEntry>,
P: AsRef<Path>,
{
let mut accounts_db = AccountsDB::new(account_paths.to_vec(), cluster_type);
let AccountsDbFields(storage, version, slot, bank_hash_info) = accounts_db_fields;
// convert to two level map of slot -> id -> account storage entry
let storage = {
let mut map = HashMap::new();
for (slot, entries) in storage.into_iter() {
let sub_map = map.entry(slot).or_insert_with(HashMap::new);
for entry in entries.into_iter() {
let entry: AccountStorageEntry = entry.into();
entry.slot.store(slot, Ordering::Relaxed);
sub_map.insert(entry.append_vec_id(), Arc::new(entry));
}
}
map
};
let mut last_log_update = Instant::now();
let mut remaining_slots_to_process = storage.len();
// Remap the deserialized AppendVec paths to point to correct local paths
let mut storage = storage
.into_iter()
.map(|(slot, mut slot_storage)| {
let now = Instant::now();
if now.duration_since(last_log_update).as_secs() >= 10 {
info!("{} slots remaining...", remaining_slots_to_process);
last_log_update = now;
}
remaining_slots_to_process -= 1;
let mut new_slot_storage = HashMap::new();
for (id, storage_entry) in slot_storage.drain() {
let path_index = thread_rng().gen_range(0, accounts_db.paths.len());
let local_dir = &accounts_db.paths[path_index];
std::fs::create_dir_all(local_dir).expect("Create directory failed");
// Move the corresponding AppendVec from the snapshot into the directory pointed
// at by `local_dir`
let append_vec_relative_path =
AppendVec::new_relative_path(slot, storage_entry.append_vec_id());
let append_vec_abs_path = stream_append_vecs_path
.as_ref()
.join(& | genesis_config: &GenesisConfig, | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.