anchor
stringlengths
0
150
positive
stringlengths
0
96k
source
dict
Charged Black Hole: Self-Discharge
Question: Palti writes in his introduction to the swampland (page 39) that a charged black hole (BH) could discharge itself, when the Hawking temperature $$ T_H=\frac{R_+-R_-}{4\pi R_+^2} $$ ($R_+$ and $R_-$ the outer and inner horizons of the BH) is bigger than the mass $m$ of the charged particle with charge $q$, because then the particle can be thermally produced. The electric field of the BH induces a chemical potential, which favours the production of particles with opposite charge wrt the BH charge. Now my question is: how can a single particle be produced thermally? I thought only pair production is allowed?! Otherwise, e.g. lepton-numbers would not be conserved or other conserved quantum numbers. Answer: Answer While writing the question I found the answer: The idea is that the particle with opposite charge falls into the black hole (BH), decreasing the BH charge, whereas the other particle escapes. So, it actually is pair production.
{ "domain": "physics.stackexchange", "id": 71385, "tags": "thermodynamics, black-holes, thermal-radiation" }
Screen transition effect like in Final Fantasy games
Question: I've created a battle screen transition effect similar to Final Fantasy games, you can see it in action here: https://gilles-leblanc.github.io/ff8-transition/ I'm interested in code quality, performance, optimizations and general JavaScript usage. One thing in particular is I declared the color function inside the colorSwoosh function to make it private to that function as a form of encapsulation, I'm unsure about whether this is a good thing. const SideEnum = Object.freeze({"left": 1, "right": -1}); const Pass = Object.freeze({"first": 1, "second": 2}); let canvas; let context; let imageData; const numberOfIndicesPerPixel = 4; let xProgress = []; const onColor = 255; const offColor = 0; let height, width, segmentLength, sideMultiplier, initialX, lineHeight; let numberOfFramesRan; const config = { color: onColor, currentPass: Pass.first, direction: SideEnum.left, frameLimitFactor: 1, horizontalSegments: 10, initSpread: 0.2, lineHeight: 4, } var initTransition = function() { xProgress = []; const spread = width * config.initSpread; for (let y = 0; y < height; y += config.lineHeight) { let numberOfPixelsToInit = Math.floor(Math.random() * Math.floor(spread)); for (let z = 0; z < config.lineHeight; z++) { xProgress.push(numberOfPixelsToInit); for (let x = 0; x <= numberOfPixelsToInit; x++) { imageData.data[(y + z) * width + (initialX + x * sideMultiplier)] = config.color; } } } context.putImageData(imageData, 0, 0); } var colorSwoosh = function(timestamp) { function color(passMultiplier) { const calcValueToAddFunc = function(x, segmentLength) { return config.horizontalSegments - Math.floor(x / segmentLength); } for (let y = 0; y < height; y++) { for (let x = xProgress[y]; x <= width; x++) { const valueToAdd = calcValueToAddFunc(x, segmentLength + xProgress[y]); imageData.data[y * width + (initialX + x * sideMultiplier)] += valueToAdd * passMultiplier; } } context.putImageData(imageData, 0, 0); } if (config.frameLimitFactor === 0 || Math.floor(timestamp % config.frameLimitFactor) === 0) { color(config.currentPass === Pass.first ? 1 : -1); } const lastValueToCheck = config.direction === SideEnum.right ? 1 : imageData.data.length - numberOfIndicesPerPixel; numberOfFramesRan++; if (config.currentPass === Pass.first) { if (numberOfFramesRan < 100 && imageData.data[lastValueToCheck] < config.color) { window.requestAnimationFrame(colorSwoosh); } else { config.color = offColor; config.currentPass = Pass.second; config.frameLimitFactor = 1; initTransition(); window.requestAnimationFrame(colorSwoosh); setTimeout(() => { document.getElementById('start-button').disabled = false; }, 2000); } } else { if (imageData.data[lastValueToCheck] > config.color) { window.requestAnimationFrame(colorSwoosh); } } }; window.addEventListener("load", function() { canvas = document.getElementById('main-canvas'); startButton = document.getElementById('start-button'); const canvasWidth = canvas.getBoundingClientRect().width; const canvasHeight = canvas.getBoundingClientRect().height; canvas.setAttribute("width", canvasWidth); canvas.setAttribute("height", canvasHeight); context = canvas.getContext('2d', { alpha: false }); context.fillStyle = "black"; context.fillRect(0, 0, canvasWidth, canvasHeight); startButton.onclick = function() { numberOfFramesRan = 0; startButton.disabled = true; startButton.blur(); config.color = onColor; config.currentPass = Pass.first; if (Math.floor(Math.random() * 2) === 0) { config.direction = SideEnum.right; } config.frameLimitFactor = +document.getElementById('frameSkip').value; config.horizontalSegments = +document.getElementById('segmentation').value; config.initSpread = +document.getElementById('initSpread').value; config.lineHeight = +document.getElementById('lineHeight').value; if (document.documentElement.clientWidth <= 800) { config.lineHeight = 2; } context.drawImage(document.getElementById('ff8'), 0, 0, canvasWidth, canvasHeight); setTimeout(() => { imageData = context.getImageData(0, 0, canvasWidth, canvasHeight); width = imageData.width * numberOfIndicesPerPixel; height = imageData.height; segmentLength = width / config.horizontalSegments; sideMultiplier = config.direction === SideEnum.left ? 1 : -1; initialX = config.direction === SideEnum.left ? 0 : width - 1; initTransition(); window.requestAnimationFrame(colorSwoosh); }, 1000); }; }); Answer: Code and style Many variables you have declared can be constants const rather than let. Element properties defined in the DOM do not need to be set via setAttribute eg canvas.setAttribute("width", canvasWidth); can be canvas.width = canvasWidth window is the global this. You do not need to specify it. eg window.requestAnimationFrame can be requestAnimationFrame, window.addEventListener( as addEventListener Declare functions using statements (a function declaration) rather than expressions. eg var funcName = function() { is better as function funcName() { because function statements seclarations are hoisted, while function expressions are not. Try to avoid for(let as this pattern has some additional memory and assignment overhead, it also has some very unusual behavior that can catch the unwary. eg in the function initTransition the 3 loop counters x,y,z are better declared as function scoped var Try to keep variable names short and concise. They only exist within the scope they are declared in and as such gain much of their semantic meaning from that scope. eg numberOfPixelsToInit is a sentence and pixelLen, pixels, or just size would fit the role. Don't reassign variables if you don't need to. Eg you declare and define let xProgress = []; and the first time you use it you reassign an array to it in initTransition. In this case rather than create a new array just empty the array with xProgress.length = 0 and then you can define it as a const xProgress There is no need to do the double floor. Eg Math.floor(Math.random() * Math.floor(spread)); can be Math.floor(Math.random() * spread); and will produce the same result. You can use the shorter form if the value is positive 32bit signed integer Math.random() * spread | 0 bitwise | (OR) 0 to floor. All bitwise operations will convert the Number to a signed 32bit integer. Use aliases to reduce overhead of compound property lookups. eg const lineHeight = config.lineHeight; in function initTransition so you need not have the lookup overhead in the loops Variables SideEnum and Pass should start with lowercase. PascalCase naming is reserved for functions called with the new token Always use addEventListener to add event, adding events via the named event should be avoided. eg startButton.onclick = function() { should be startButton.addEventListener("click", .... There are more issues however your use of getImageData and setImageDate is a major point of concern so I have spent the time creating an example rather than stepping through your code line by line. Question "One thing in particular is I declared the color function inside the colorSwoosh function to make it private to that function as a form of encapsulation, I'm unsure about whether this is a good thing." Neither good nor bad. Modern JS has modules that will encapsulate the content to the modules scope so if you used a module color as a function would be fine in modules main scope. If you don't use modules use IIF pattern and to encapsulate. IIF is an immediately invoked function. The function is created (function(){})() and the brackets at the end call it. The function will close over and encapsulate the content. You can return an interface if needed. IIF const myFX = (() => { // all the code vars and functions within the IIF scope const myPrivateConst = "foo"; var myPrivateVar; function myPrivateFunction() {} // Define an interface if you need to access outside this scope const API = { someStartFunction() { /* do stuff */ }, someOtherStuff() { /* do stuff */ }, stopStuff() { /* do stuff */ }, }; return API })(); GPUs do pixels, not CPUs setImageData and getImageData are frame rate killers, if you use them to animate you are doing it wrong. Using the GPU via canvas 2D API The example below attempts to mimic your wipe animation without using image data and CPU to process pixels. Rather it uses the GPU (via 2D API) to manipulate pixels and lets the CPU handle timing and setting up. I could not work out (too many options) just what you wanted it to look like so the example is very basic and just wipes right to left. It uses a mask image (canvas that is not displayed) that on the first swipe reduces pixel alpha to show the canvas background (white) and on the chasing swipe refills the pixels to the final swipe color (black). Will resize to fit page once media has loaded. Resize will reset animations. Click to restart wipe FX. If media can not be found animation will not start. There are comments where I think you need them. I have used your image, I hope that is OK and you have rights to use? Example /* Notes Display canvas alpha setting must be true as the canvas background will be the first wipe colour (white in this example) Frame rate is assumed constant at 60 fps I am assuming you have usage rights and I am using this image under assumption of fair use in response to your question https://codereview.stackexchange.com/questions/226919/screen-transition-effect-like-in-final-fantasy-games */ requestAnimationFrame(mainLoop); // request first frame. This starts the main animation loop. will not reader till ready var update = true; // When true render content var ready = false; // When content loaded and ready to animate const FRAME_RATE = 60; // Per second. This does not set frame rate // settings for the alpha gradient used in the FX const GRAD_RES = 1 / 40; // resolution of grad curve, no smaller than 1/GRAD_WIDTH const GRAD_POW = 4.2; // Ease power of grad curve const GRAD_WIDTH = 256; // 256 to match max alpha steps const config = { imgURL: "https://gilles-leblanc.github.io/ff8-transition/ff8.jpg", image: new Image(), width: innerWidth, height: innerHeight, spread: 0.6, // fraction of display size. Max spread amount spreadMin: 0.2, // fraction of display size. Min spread amount lineHeight: 4, // in pixels wipeBOffset: 0.5, // distance behind first swipe of second swipe in fractions of display width * 2 // if wipeBOffset > 1 then second swipe will not happen // MUST be greater than 0 wipeStrength: 0.1,// alpha setting of wipe FX wipeTime: 1, // in seconds. MUST!!! be greater than 1/60 second wipeEase: 2, // strength of wipe ease. if 1 no ease,0 < val < 1 ease out, 1 < val ease in waitTime: 0.5, // in seconds, time to wait after FX before displaying info font: "32px Arial Black", // font to render info infoColor: "white", // Color to render info. MUST BE VALID css color info: "Click to repeat!", // text displayed after wipe complete wipeAColor: "#FFF", // WipeA colour MUST be valid CSS color wipeBColor: "0,0,0,", // WipeB colour R,G,B parts of CSS color format rgba(R,G,B,A) }; config.image.src = config.imgURL; config.image.addEventListener("load", setup); const animation = { wipeA: { posX: 0,}, wipeB: { posX: 0,}, waitForInfo: 0, active: false, speed: 0, reset() { animation.waitForInfo = config.waitTime * FRAME_RATE | 0; animation.active = true; animation.wipeA.posX = 1; animation.wipeB.posX = 1 + config.wipeBOffset; animation.speed = (config.width / (config.wipeTime * FRAME_RATE)) / config.width; config.display.ctx.drawImage(config.image, 0, 0, config.width, config.height); }, render() { const ctx = config.display.ctx; const mask = config.mask; animation.wipeA.posX -= animation.speed; animation.wipeB.posX -= animation.speed; var x = calcEasePos(animation.wipeA.posX); animation.active = false; ctx.globalAlpha = config.wipeStrength; if (x < config.width && x > -config.width) { ctx.globalCompositeOperation = "destination-out"; ctx.drawImage(mask, x, 0); ctx.globalCompositeOperation = "source-over"; animation.active = true; } x = calcEasePos(animation.wipeB.posX);; if (x <= config.width && x > -config.width * config.spread) { ctx.drawImage(mask,x,0); // fill rect that is trailing edge of this swipe ctx.fillStyle = `rgba(${config.wipeBColor}1)`; ctx.fillRect(x + config.width,0,ctx.canvas.width, ctx.canvas.height); animation.active = true; } ctx.globalAlpha = 1; }, } function calcEasePos(unitPos) { return unitPos > 1 ? config.width + 1 : unitPos < 0 ? -config.width : unitPos ** config.wipeEase * config.width * 2 - config.width; } function createCanvas(width, height) { return Object.assign(document.createElement("canvas"), {width, height}); } function createGradient(ctx) { var i; const grad = ctx.createLinearGradient(0, 0, GRAD_WIDTH, 0); grad.addColorStop(0, `rgba(${config.wipeBColor}0)`); // from alpha 0 on the left for (i = GRAD_RES; i < 1; i += GRAD_RES) { // Create logarithmic curve on gradient grad.addColorStop(i, `rgba(${config.wipeBColor}${i ** GRAD_POW})`); } grad.addColorStop(1, `rgba(${config.wipeBColor}1)`); // to alpha 1 on the right return grad; } function drawFinalfade() { // after wipeB complete this ensures that canvas color ends up matching desired color const ctx = config.display.ctx; ctx.globalAlpha = config.wipeStrength; ctx.fillStyle = `rgba(${config.wipeBColor}1)`; ctx.fillRect(0, 0, ctx.canvas.width, ctx.canvas.height); ctx.globalAlpha = 1; } function displayInfo() { const ctx = config.display.ctx; ctx.font = config.font; ctx.textAlign = "center"; ctx.textbaseline = "middle"; ctx.fillStyle = config.infoColor; ctx.fillText(config.info , config.width / 2, config.height / 2); } function createMask() { var y; const can = createCanvas(config.width, config.height), ctx = can.getContext("2d"); const spread = config.width * (config.spread - config.spreadMin), lineH = config.lineHeight; const spreadMin = config.width * config.spreadMin; const spreadRange = config.width * config.spread; const grad = createGradient(ctx); ctx.fillStyle = `rgba(${config.wipeBColor}1)`; // filling right side of mask ctx.fillRect(spreadRange, 0, config.width - spreadRange | 0, config.height); // render each row scaling (via setTransform) the gradient to fit the random spread ctx.fillStyle = grad; for (y = 0; y < config.height; y += lineH) { const size = Math.random() * spread + spreadMin | 0 const scale = size / GRAD_WIDTH; ctx.setTransform(scale, 0, 0, 1, 0, y); // only scale x axis ctx.fillRect(0, 0, spreadRange * GRAD_WIDTH / scale, lineH); } ctx.setTransform(1, 0, 0, 1, 0, 0); // restore default transform return can; } function createDisplay() { // Main render canvas added to document.body const can = createCanvas(config.width, config.height); can.style.backgroundColor = config.wipeAColor; const ctx = can.ctx = can.getContext("2d", {alpha : true}); ctx.drawImage(config.image, 0, 0, can.width, can.height); document.body.appendChild(can); // There should be a CSS rule for canvas element can.addEventListener("click",() => { if (!animation.active) { animation.reset(); update = true; } }); return can; } function setup() { // call once only config.display = createDisplay(); config.mask = createMask(); animation.reset(); ready = update = true; setup = undefined; // This will force an exception if setup is called a second time } function mainLoop() { if (ready) { // if ready check for resize if (innerWidth !== config.width || innerHeight !== config.height) { config.display.width = config.width = innerWidth; config.display.height = config.height = innerHeight; config.mask = createMask(); animation.reset(); ready = update = true; } } if (ready && update) { // Avoid unneeded rendering if (animation.active) { animation.render(); update = true; } else { if (animation.waitForInfo) { drawFinalfade(); animation.waitForInfo --; update = true; } else { update = false; // stops rendering displayInfo(); } } } requestAnimationFrame(mainLoop); } canvas { position: absolute; left: 0px; top: 0px; }
{ "domain": "codereview.stackexchange", "id": 35799, "tags": "javascript, dom, animation" }
JavaScript list sorting by date and type. better code?
Question: I'm a junior developer. And I want to learn the good way to code. The code I will show you is working, but I feel like its all messy and longer that it should be. It is simply a .js, a .html and some css that render a list of informations about scans. The list come from Firebase (Google Analytics). Here is the code I suspect of being messy - it's a script that sort the 'li' components of the list each time a date is entered or a selection is made (or both). <script> function myFunction() { // Declare variables var input, input2, filter, filter2, ul, li, a, i, txtFirstValue, txtThirdValue, txtFourthValue; input = document.getElementById('myInput'); input2 = document.getElementById('myInput2'); filter = input.value.toUpperCase(); filter2 = input2.value.toUpperCase(); ul = document.getElementById("Scan-list"); li = ul.getElementsByTagName('li'); counter = 0; test = li[0].getElementsByTagName("span")[3]; // Loop through all list items, and hide those who don't match the search query (date 1 to date 2) for (i = 0; i < li.length; i++) { scanFirstValue = li[i].getElementsByTagName("span")[0]; scanThirdValue = li[i].getElementsByTagName("span")[2]; scanFourthValue = li[i].getElementsByTagName("span")[3]; txtFirstValue = scanFirstValue.textContent; txtThirdValue = scanThirdValue.textContent; txtFourthValue = scanFourthValue.textContent; var stringToDate = new Date(txtFirstValue); var testSelectBox = dropdownBodypartsDeep.value; var testvaluebiddon = "Scan Type Specialize : Dorsum"; if (input.value == ""){ if( ((txtThirdValue == "Scan Type : " + dropdownBodyparts.value) && (txtFourthValue == "Scan Type Specialize : " + dropdownBodypartsDeep.value)) | ((txtThirdValue == "Scan Type : " + dropdownBodyparts.value) && (txtFourthValue == "Scan Type Specialize : ")) ) { li[i].style.display = ""; counter = counter +1; } else if (dropdownBodyparts.value == "All"){ li[i].style.display = ""; counter = counter +1; } else if ( (txtThirdValue == "Scan Type : " + dropdownBodyparts.value) && (dropdownBodypartsDeep.value == "All")){ li[i].style.display = ""; counter = counter +1; } else { li[i].style.display = "none"; } } else if(input.value != "" && dropdownBodyparts.value == "null") { if( (stringToDate.getTime() >= (new Date(input.value)).getTime() && stringToDate.getTime() <= (new Date(input2.value)).getTime()) ) { li[i].style.display = ""; counter = counter +1; } else { li[i].style.display = "none"; } } else if(input.value != "" && dropdownBodyparts.value != "null") { if( (stringToDate.getTime() >= (new Date(input.value)).getTime() && stringToDate.getTime() <= (new Date(input2.value)).getTime()) && ((txtThirdValue == "Scan Type : " + dropdownBodyparts.value) && (txtFourthValue == "Scan Type Specialize : " + dropdownBodypartsDeep.value)) | ((txtThirdValue == "Scan Type : " + dropdownBodyparts.value) && (txtFourthValue == "Scan Type Specialize : ")) ) { li[i].style.display = ""; counter = counter +1; } else if (dropdownBodyparts.value == "All" && (stringToDate.getTime() >= (new Date(input.value)).getTime() && stringToDate.getTime() <= (new Date(input2.value)).getTime())) { li[i].style.display = ""; counter = counter +1; } else { li[i].style.display = "none"; } } } document.getElementById('counterLabel').innerHTML = counter; } </script> And here is the HTML for the web page : <body> <h1 id="titleLabel" >TechMed3D Scan DataBase</h1> <div id="imageTechMed"> <img src="css/3dsizeme2019.png" class="topRight" style="width:250px;height:45px;"> </div> <div class="content"> <div class="container"> <label id="labelDate">Date de départ : </label> <input type="text" id="myInput" name="a" onkeyup="myFunction()" placeholder="yyyy-mm-dd" > <form> <select name = "dropdown" id="dropdownBodyparts" onchange="if (this.selectedIndex) myFunction();"> <option disabled selected value = "null"> -- Select A Bodypart --</option> <option value = "All" >All</option> <option value = "Head">Head</option> <option value = "Foot">Foot</option> <option value = "Leg">Leg</option> <option value = "Elbow">Elbow</option> <option value = "Torso">Torso</option> </select> </form> </div> <div class="container"> <label id="labelDate">Date de fin : </label> <input type="text" id="myInput2" name="b" onkeyup="myFunction()" placeholder="yyyy-mm-dd" > <form> <select name = "dropdown" id="dropdownBodypartsDeep" onchange="if (this.selectedIndex) myFunction();"> <option value = "Specialized" selected>Specialized</option> </select> </form> <div id="scanNumberRight"> <label id="counterAnouncerLabel">Nombre de scan dans la liste : </label> <label id="counterLabel">-</label> </div> </div> <div style="text-align:center;"> <input type="button" id="first" onclick="firstPage()" value="first" /> <input type="button" id="next" onclick="nextPage()" value="next" /> <input type="button" id="previous" onclick="previousPage()" value="previous" /> <input type="button" id="last" onclick="lastPage()" value="last" /> </div> <ul id="Scan-list"></ul> </div> For the selectBox, i use this javascript code to render the good value : //Set the second selectBox depending on the first one choice. $(document).ready(function () { $("#dropdownBodyparts").change(function () { var val = $(this).val(); if (val == "All") { $("#dropdownBodypartsDeep").html("<option value='All'>All</option>"); } else if (val == "Head") { $("#dropdownBodypartsDeep").html("<option value='Specialized'>Specialized</option>"); } else if (val == "Foot") { $("#dropdownBodypartsDeep").html("<option disabled selected value> -- Select A Specialized Bodypart</option><option value='All'>All</option><option value='Dorsum + Imprint'>Dorsum + Imprint</option><option value='Plantar Surface'>Plantar Surface</option><option value='Foam Box'>Foam Box</option><option value='Dorsum'>Dorsum</option><option value='for AFO'>for AFO</option>"); } else if (val == "Leg") { $("#dropdownBodypartsDeep").html("<option disabled selected value> -- Select A Specialized Bodypart</option><option value='All'>All</option><option value='Knee'>Knee</option><option value='AK'>AK</option><option value='BK'>BK</option>"); } else if (val == "Elbow") { $("#dropdownBodypartsDeep").html("<option value='Specialized'>Specialized</option>"); } else if (val == "Torso") { $("#dropdownBodypartsDeep").html("<option disabled selected value> -- Select A Specialized Bodypart</option><option value='All'>All</option><option value='Normal'>Normal</option><option value='Two-sided'>Two-sided</option><option value='Mirror'>Mirror</option><option value='Seating'>Seating</option>"); } }); }); And here is the UI Answer: Use meaningful variable names (and element IDs), for example, startDateInput instead of input/myInput. Declare varables in the smallest needed scope instead all of them at the start of the function, and declare each variable in a separate statement. Use let or const instead of var (unless you need to support enviroments that don't support them). Move the variables that don't change for each execution of the function, such as the input references, outside the function. Use the children property to access the list items of the scan list. Example of these changes: const startDateInput = document.getElementById('startDateInput'); const endDateInput = document.getElementById('endDateInput'); const scanList = document.getElementById("Scan-list"); function myFunction() { const startDateFilter = startDateInput.value.toUpperCase(); const endDateFilter = endDateInput.value.toUpperCase(); const li = scanList.children; let counter = 0; // ... } Unless you exlipictly need the index use a for ... of loop to iterate over the lis. Don't repeat things like getElementsByTagName("span"). for (const item of li) { const spans = item.getElementsByTagName("span") const itemDateString = spans[0].contentText; // Variable name describing content const itemBodyPart = spans[2].contentText; // ... } Be careful using new Date(...) to convert arbitrarily strings into dates. Especially it will break if you use a different date format in the output. The big filtering if block is difficult to read. Don't repeat the same comparisons. Extract the large boolean expressions that go over multiple lines into separate functions. Don't hard code strings such as "All" and "Scan Type : " in your code. It makes translations diffcult. Generally parsing the data out of the output HTML is a bad idea. It would be better to filter the raw data that the list items represent instead, but that would require completely different code. At least consider encoding the relevant data in the list items in microdata attributes or data- attributes. For example, instead of <li> <span>2019-12-24</span> <span>Scan Type : Foot</span> </li> have something like this (with microdata): <li itemscope> <!-- Identify the elememt via the `itemprop` attribute instead of its index. Have the datetime attribute in a computer readable format and display the date to the user in a human readable format --> <time itemprop="date" datetime="2019-12-24"> December 24th, 2019 </time> <!-- `value` attribute contains the data in format the script understands and the text is translatable and human readable. --> <span itemprop="body-part" value="Foot"> Scan Type : Pied </span> </li> Or with data attributes: <li> <span data-date="2019-12-24"> December 24th, 2019 </span> <span data-body-part="Foot"> Scan Type : Pied </span> </li> counter = counter +1; can be simplified to counter++. Don't repeat li[i].style.display = ""; counter = counter +1;. Instead have your filtering if block return a boolean (visible), and then at the end say: if (visible) { li[i].style.display = ""; // Or `item.style.display = "";` in my example counter++; } else { li[i].style.display = "none"; } Don't assign event handlers in the HTML in the on... attributes. Instead assign them in the JavaScript code with addEventListener. disabled and selected on the same option doesn't make much sense. You didn't use jQuery in myFunction so no need to start using it in the other JavaScript. Avoid jQuery as much as you can, especially in such simple scripts where its features aren't taken advantage of.
{ "domain": "codereview.stackexchange", "id": 37038, "tags": "javascript, jquery, html" }
Spinning an object in vertical circle
Question: The situation : A block of mass $M$ is tied to a string and is spun around in a vertical circle. The question asks me to calculate the tension in the string 'at the lowest point' after giving some values. In the problem, when the body is in the lowest point, shouldn't the tension be $Mg$ only? It is told that perpendicular forces are independent of each other, then why/why not the velocity of the mass be included in this scope of calculating the tension in the wire? Answer: To keep the mass going around in a circle, you need to be accelerating towards the center of the circle. The force for this (if your speed is $v$) is $$F = \frac{mv^2}{r}$$ where $m$ is the mass, and $r$ is the radius of the circle. Now $v$ will be a function of position in your arc - you will have to figure out what it is at the bottom (conservation of energy may be your friend). To get the tension in the string, this force has to be added to the force of gravity at the bottom (and it is subtracted at the top because gravity will be pointing in the same direction as the force needed to keep the mass in orbit - so you need less tension in the string).
{ "domain": "physics.stackexchange", "id": 25801, "tags": "homework-and-exercises, newtonian-mechanics, forces, free-body-diagram, angular-velocity" }
Will increased precipitation in Antarctica prevent sea level rise?
Question: It has been suggested that global warming will lead to increased precipitation in parts of Antarctica. This would sequester water in the ice sheet, preventing sea level rise. However, the slope (and thus the height and volume) of continental ice sheets is limited by ice properties. What if the ice sheet is already at its maximum slope/height/volume (is this known)? Wouldn't adding more ice just cause more flow to the sea? According to "Ice Sheet Modeling" ice behaves as a deformable plastic material, which means that there is a critical shear stress, below which no strain (deformation or flow) will occur ... If the slope is too low, the basal shear stress will not match the critical shear stress, ... but as snow piles up, ... flow will begin. ... The result of this is that a glacier has an equilibrium profile I am presuming that the ice sheets are already slightly above their equilibrium profile (since they are flowing). Since the profile depends on the area and shape of the base (which is shrinking due to melting around the edges), they are already holding as much ice as possible, and adding more snow to any region will just accelerate the flow. I don't know on what timescale this occurs. (I have not tried the computer models described in the paper.) Answer: There is some scope for continuing debate because quantifying the various components of the ice/snow/water balance are fraught with difficulty, and many of the estimates have error bounds which approach the magnitude that is being measured. However, a good best estimate, subject to continuing research, is given in:Is Antarctica losing or gaining ice? This cites the following two graphs: Clearly there are important regional differences, with East Antarctica gaining slightly, but not enough to compensate for the ice loss in west Antarctica. It is true that the amount of fresh snow in the Antarctic is increasing, but it still doesn't amount to much. Antarctica is still the driest continent, by far, and much more so than Australia. The controlling factor is not the amount of snow on the land, but the fate of the sea ice around the perimeter. Notwithstanding climate deniers, there is abundant evidence that the oceans' sea surface temperatures are warming, including the Southern Ocean. This reduces the seasonal extent and thickness of sea ice, which lowers the oceanic albedo, and increases near-surface heat absorbtion. It is a well-documented feedback process. Overall the warmer ocean around Antarctic destabilizes the shelf ice, thereby unlocking the glacial ice which would have been held back. So we have two opposing non-linear processes, mobilization of coastal land ice (mass loss), vs increased snowfall (mass gain). At present, and for the forseable future, the mass-loss is winning. I'm guessing this will continue to be the case because the negative feedback is very powerfull. I don't think there are any reliable forward projections about how precipitation in the Antarctic will evolve. (Later) In view of the comments, maybe it would be helpful to visualize the daily precipitable moisture around the world. This is available at El Dorado Weather.
{ "domain": "earthscience.stackexchange", "id": 877, "tags": "climate-change, sea-level, glaciology, ice-sheets" }
Terminal freezes after roslaunch
Question: Hello! i have a problem.I've written a roslaunch but if execute it and then ctrl-c the current window of the terminal freezes and i have to open a new one. It's almost equal to others .launch files that do not give problem,except that in this one i passes some args to a node using: args="\dir\blabla.ext" what could be the problem? Originally posted by ldima on ROS Answers with karma: 122 on 2012-03-17 Post score: 0 Original comments Comment by Kevin on 2012-03-17: Can you post the launch file, what operating system, what version of ROS, etc? Need more info to help ... thanks! Answer: IIRC, teleop_base_keyboard takes input from the current terminal, so it's likely that its taking over the terminal that you are running roslaunch from. You should run teleop_base_keyboard separately. Originally posted by kwc with karma: 12244 on 2012-03-20 This answer was ACCEPTED on the original site Post score: 0
{ "domain": "robotics.stackexchange", "id": 8624, "tags": "roslaunch" }
Getting the index of a character within the alphabet
Question: I've made this function to map alphabetic coordinates to their corresponding ordinal number. var exec = document.querySelector('#exec'); // Maps alphabetic characters to their index // within the alphabet. // -------------------------------------------- // @param { string || array } chars - String // consisting of a single alphabetic character // or an array which elements are single // alphabetic characters. // @throws Error in case of invalid // parameter ( == not a single alphabetic // character ). // -------------------------------------------- // @returns { array } with numbers. The indexes // of the characters within the alphabet. // // ------- Usage examples ---------- // getIndexInAlphabet('i')); // Result : [9] // getIndexInAlphabet(['a', 'B', 'c'])[1] // 2 // getIndexInAlphabet(['a', 'G', 'z'])); // [1, 7, 26] function getIndexInAlphabet( chars ) { var alphabet = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' ]; if (!Array.isArray(chars)) { if (typeof chars === 'string') { let tmp = []; tmp.push(chars); chars = tmp; } else { throw new Error( 'Parameter invalid because not of type string.'); } } chars.forEach(function(item, i) { if (typeof item !== 'string') { throw new Error('Element ' + i + ' invalid because not of type string.'); } }); return chars.map(function(char, i) { var ret = alphabet.indexOf(char.toLowerCase()) + 1; if (ret === 0) { throw new Error('Element ' + i + ' invalid because' + ' not an alphabetic character.'); } return ret; }); } // -- From here on : Just testing ... exec.addEventListener('click', function() { try { console.log(getIndexInAlphabet(['a', 'B', 'c'])[1]) console.log(getIndexInAlphabet('i')); console.log(getIndexInAlphabet(['a', 'G', 'z'])); var charStr = ['a', 'b', 'c', 'd', 'e', 'f']; var indexes = getIndexInAlphabet(charStr); var charMap = {}; charStr.forEach(function(char, i) { charMap[char] = indexes[i]; }); console.log(charMap.f); } catch (e) { console.log(e.message); console.log(e.stack); } }); .wrap { width: 800px; margin: 50px auto; } <div class="wrap"> <div class="buttons"> <a href="#" id="exec" class="slideshow-nav">Exec!</a> </div> </div> Answer: Changed the way you wrote the alphabet even though it's fine tuning at this point, I prefer one liner when they're still clean and easy to understand. I'd rather use a more declarative way of writing the process. Fiddle : https://jsfiddle.net/y6zx2rht/1/ function getIndexInAlphabet(chars) { var alphabet = 'abcdefghijklmnopqrstuvwxyz'.split(''), validateItemIsString = function(item) { if (typeof item !== 'string') { throw new Error('Element ' + i + ' invalid because not of type string.'); } }, validateCharsAreStrings = function(chars) { chars.forEach(function(item, i) { validateItemIsString(item); }); }, getSanitizedChars = function(chars) { if (!Array.isArray(chars)) { chars = [chars]; } validateCharsAreStrings(chars); return chars; }, getIndexInAlphabet = function(char, i) { var ret = alphabet.indexOf(char.toLowerCase()) + 1; if (ret === 0) { throw new Error('Element ' + i + ' invalid because' + ' not an alphabetic character.'); } return ret; }; return getSanitizedChars(chars).map(function(char, i) { return getIndexInAlphabet(char, i); }); } This lets you prepare the whole process within small isolated steps. Also, you won't have to focus so much on the "how I want this", but on the "I want this". This is pretty clear in this portion of code : if (!Array.isArray(chars)) { if (typeof chars === 'string') { let tmp = []; tmp.push(chars); chars = tmp; } //... } What you want here is : "Return an array containing chars". What you do instead is : "Create an empty array. Push chars into it. Return the array". Then it becomes : if (!Array.isArray(chars)) { if (typeof chars === 'string') { return [chars]; } } Also, as 200_success said, validating that char is a string and that it is not an array is redundant, so it can be further simplified to : if (!Array.isArray(chars)) { return [chars]; }
{ "domain": "codereview.stackexchange", "id": 42432, "tags": "javascript, strings, ecmascript-6" }
What do colored noises look like in the time domain?
Question: I understand that the time domain representation of white noise looks like impulses. How do colored noises like brown, pink etc. look like when we perform an inverse Fourier Transform on them ? What could be some sources of colored noise that may affect speech signals? Answer: I guess you ask how they sound rather than how they look, right? The "color of the noise" corresponds to the perception you would have from mixing different perfectly chromatic wavelengths: white if you cover all frequencies, pink when there is less blue (a smooth drop in high frequencies), brown (if the drop is even more sharp) and so on... Similarly, the "color of the your noise" in the temporal domain should be put in analogy to natural sources of noises: drops of water are independent and there mixing tends to form white noise, the sound of hushing ("shhh..") is basically white noise that would be filtered by your speech apparatus, so this comparable to pink noise, the diversity of colors can be perceived in music for instance
{ "domain": "dsp.stackexchange", "id": 3587, "tags": "noise, speech, transform, fourier" }
SEDE Query to Find... Weird Questio​n/Protect Combinations
Question: I was interested in finding some of the weirdest question/protection combinations, basically out of curiosity. I heuristically defined this as: Low view counts Not a moderator/SE employee From users who protect multiple questions I'm... let's just say not an expert in SQL. Though this query works, it feels super inefficient (especially the 2x left join, it seems plausible that could be a single join). Looking basically for any feedback. I used this site to format things, so hopefully it's more readable than the mess it was before I started formatting it. You can play with it here if your curiosity is also too strong to resist ;) DECLARE @postViews int SET @postViews = ##postView## SELECT [Post Link] = ph.PostId, [User Link] = ph.UserId, Protected.ProtectCount, Unprotected.UnprotectedCount FROM PostHistory ph JOIN Posts p ON ph.PostID=p.id LEFT JOIN (SELECT count(UserID) AS ProtectCount, PostID FROM PostHistory ph WHERE ph.PostHistoryTypeID=19 GROUP BY PostID) AS Protected ON Protected.PostID=ph.PostID LEFT JOIN (SELECT count(ph.UserID) AS UnprotectedCount, PostID FROM PostHistory ph WHERE ph.PostHistoryTypeID=20 GROUP BY PostID) AS Unprotected ON Unprotected.PostID=ph.PostID WHERE ph.UserID IN (SELECT top ##NumbUsers## ph.UserId FROM PostHistory ph JOIN Posts p ON ph.PostID=p.id WHERE ph.PostHistoryTypeId = 19 AND p.ViewCount < @postViews /* exclude former moderators */ AND ph.UserID NOT IN (419, 1228, 1288, 19679, 23354, 50049, 59303, 102937, 106224, 246246, -1) GROUP BY ph.UserId ORDER BY COUNT (ph.UserId) DESC) AND ph.PostHistoryTypeId = 19 AND p.ClosedDate IS NULL AND p.ViewCount < @postViews AND isnull(Protected.ProtectCount,0) != isnull(Unprotected.UnprotectedCount,0) ORDER BY ph.UserId Answer: One of the first things you should do if you have an arbitrary data set (as you do in your list of UserIds) would be to extract it out of the WHERE clause. Just make a @TempTable variable. If the data set gets very large, consider a "physical" #TempTable. DECLARE @UsersToExclude TABLE (UserId INT PRIMARY KEY); INSERT INTO @UsersToExclude (UserId) VALUES /*Some comments to explain these numbers would be good:*/ (419), (1228), (1288), (19679), (23354), (50049), (59303), (102937), (106224), (246246), (-1); Then you can just do an existence check in your query: AND ph.UserID NOT IN ( SELECT usrExcl.UserID from @UsersToExclude as usrExcl ) Please get into the habit of using the (optional) keyword AS to reference to table aliases. If not used it can be ambiguous whether it's supposed to be an alias or a query hint for something else, like NOLOCK. FROM PostHistory AS ph JOIN Posts AS p ON ph.PostID = p.id This bracketing style is a bit unusual: LEFT JOIN ( SELECT foo FROM bar WHERE ...) AS Unprotected ON Unprotected.PostID=ph.PostID It looks like LISP-style brackets. Most often in SQL either C# or Java style brackets are used (depending on the programming shop). C#-style LEFT JOIN ( SELECT foo FROM bar WHERE ... ) AS Unprotected ON Unprotected.PostID=ph.PostID Java-style LEFT JOIN ( SELECT foo FROM bar WHERE ... ) AS Unprotected ON Unprotected.PostID=ph.PostID What does this number mean? WHERE ph.PostHistoryTypeID=19 No easy way to tell. Consider something like: DECLARE @QuestionProtected INT = (SELECT Id FROM PostHistoryTypes WHERE Name = 'Question Protected'); Then it's easier to follow: WHERE ph.PostHistoryTypeID = @QuestionProtected SEDE magic You can get this valueat the top, or assign a default value for newbies like me: -- postViews: Number of post views DECLARE @postViews INT = ##postViews:int?10000##; The other one is a script variable so it can't be manipulated as easily, but can still be assigned a default value (and documented): WHERE ph.UserID IN ( -- NumbUsers: Max number of users SELECT TOP ##NumbUsers:int?5000## One more thing, AND isnull(Protected.ProtectCount,0) != isnull(Unprotected.UnprotectedCount,0) The != operator is not SQL standard. Instead, use <>. Also, consider using COALESCE() instead of ISNULL() for some of the reasons listed here. Performance is right about the same, but it handles types better, usually (and can take more than 2 arguments, if needed). Everything combined (demo on SEDE) -- postViews: Number of post views DECLARE @postViews INT = ##postViews:int?10000##; DECLARE @UsersToExclude TABLE (UserId INT PRIMARY KEY); DECLARE @QuestionProtected INT = (SELECT Id FROM PostHistoryTypes WHERE Name = 'Question Protected'); INSERT INTO @UsersToExclude (UserId) VALUES /*Some comments to explain these numbers would be good:*/ (419), (1228), (1288), (19679), (23354), (50049), (59303), (102937), (106224), (246246), (-1); SELECT [Post Link] = ph.PostId, [User Link] = ph.UserId, Protected.ProtectCount, Unprotected.UnprotectedCount, p.ViewCount FROM PostHistory AS ph INNER JOIN Posts AS p ON ph.PostID = p.id LEFT JOIN ( SELECT count(UserID) AS ProtectCount, PostID FROM PostHistory ph WHERE ph.PostHistoryTypeID = @QuestionProtected GROUP BY PostID ) AS Protected ON Protected.PostID = ph.PostID LEFT JOIN ( SELECT count(ph.UserID) AS UnprotectedCount, PostID FROM PostHistory ph WHERE ph.PostHistoryTypeID=20 GROUP BY PostID ) AS Unprotected ON Unprotected.PostID = ph.PostID WHERE ph.UserID IN ( -- NumbUsers: Max number of users SELECT TOP ##NumbUsers:int?5000## ph.UserId FROM PostHistory ph JOIN Posts p ON ph.PostID=p.id WHERE ph.PostHistoryTypeId = @QuestionProtected AND p.ViewCount < @postViews /* exclude former moderators */ AND ph.UserID NOT IN ( SELECT usrExcl.UserID from @UsersToExclude as usrExcl ) GROUP BY ph.UserId ORDER BY COUNT(ph.UserId) DESC ) AND ph.PostHistoryTypeId = @QuestionProtected AND p.ClosedDate IS NULL AND p.ViewCount < @postViews AND COALESCE(Protected.ProtectCount,0) <> COALESCE(Unprotected.UnprotectedCount,0) ORDER BY ph.UserId ASC;
{ "domain": "codereview.stackexchange", "id": 18214, "tags": "sql, stackexchange" }
Creating a temp array vs passing one in Mergesort
Question: In my CS class, we've discussed two ways of dealing with the temporary array required for the merging phase of mergesort. One of them is to pass a full-size temporary array as a parameter, e.g. merge(array, tempArray, first, last), and another is to create a temporary array of appropriate size (or two half-sized arrays) in the function call, e.g. merge(array, first, last): ... leftArray = items from first to middle rightArray = items from middle+1 to last ... My CS professor is of the opinion that the first option will somehow have a performance (speed/memory) improvement over the second, but I don't understand why. Merging the arrays is a "tail" operation in mergesort, and so the second option doesn't seem like it will actually require more memory overall. Is the only reason why because memory allocation takes time? Thanks! Answer: Most programming languages pass arrays as function arguments by reference: that is, the function is told where in memory the array is located, rather than being given all the elements directly. This means that, when you call something like sort (int array A, int left, int right) only three words of data are passed to the function, regardless of how big the array is – the address of A and the integer values describing the portion of the array to be sorted. In contrast, the approach of creating two new arrays for the two halves requires allocating memory for the new arrays and copying the data across. Each of these takes a number of operations that is proportional to the size of the array. This is much less efficient, especially when you consider that the algorithm is recursive and will copy the array multiple times.
{ "domain": "cs.stackexchange", "id": 8549, "tags": "sorting, mergesort" }
What is "small layer thickness" defined as in terms of bulk Richardson number approaching gradient Richardson number?
Question: In many sources (https://glossary.ametsoc.org/wiki/Bulk_richardson_number), bulk Richardson number is defined as an approximation to gradient Richardson number. The prior only approaches the ladder when layer thickness, $\Delta z$ becomes "small". What is small defined as? For example, comparison of simulation domain to $\Delta z$, overall length $\Delta z$ approaches 0, or etc? Answer: Strictly speaking, $\lim_{\Delta z \to 0} Ri=Ri_b$. This is because the gradient Richardson number: $$Ri=\frac{\frac{g}{T_v}\frac{\partial \theta_v}{\partial z}}{\left(\frac{\partial U}{\partial z}\right)^2++\left(\frac{\partial V}{\partial z}\right)^2}$$ can be approximated as $$Ri \approx\frac{\frac{g}{T_v}\frac{\Delta \theta_e}{\Delta z}}{\left(\frac{\Delta U}{\Delta z}\right)^2++\left(\frac{\Delta V}{\Delta z}\right)^2} \tag{1}$$ which can be rewritten as the Bulk Richardson Number: $$Ri_b=\frac{\frac{g\Delta z \Delta \theta_v}{T_v}}{(\Delta U)^2+(\Delta V)^2} \tag{2}$$. The derivation of (2) from (1) is left as an exercise for the reader.
{ "domain": "earthscience.stackexchange", "id": 2246, "tags": "meteorology, atmosphere, fluid-dynamics" }
Formula for velocity of magnets depending on their weight and strength in Magnetic Repulsion
Question: Given 2 similar magnets, each having mass $m_0$ (any convenient shape, say small cylinders) held together (flat sides N-N or S-S facing each other) by force (to counter their physical repulsion) in a hollow glass tube (so they travel in a straight line). Let the magnetic strength of each magnet be $h_0$ (whatever be the appropriate unit). When the physical contraption is removed they travel in opposite direction from the origin. Each magnet travels a certain distance $d_0$ from the origin in time $t_0$. In an ideal case scenario (ignoring all the other external factors like friction etc.) how does the time taken to travel the distance $d_0$ change with change in the magnet's mass and strength? Can someone help with a formula? Note: We are assuming both magnets as equal. Answer: I assume they’re horizontal so that gravity is not part of it, and they said an ideal situation so we don’t have friction either. Just one force. $$F=ma= \frac{\mu q_1 q_2}{4 \pi r^2 (t)}= \frac{k q_1 q_2}{r^2 (t)}$$ where $r$ is a function of time, and as per the equation force is too. But the equation assumes constant $q$’s (handles distance via $r$). The force between two magnets is a function of distance; don’t worry further about fields. The fields change with distance to give that force. There are no dynamics perpendicular to the fields and motion (velocity parallel to field) so the right hand rule lets you ignore the effects of speed on force. $$a= F/m = \frac{k}{r^2 (t)}$$ Different k. To solve this use this: https://math.stackexchange.com/a/3928053/955529 But stop before “From here..” because thats the answer. That’s the limit until/unless you have questions. Details to work out for sure. Limits for integral etc. Plus I have to go. Does that help?
{ "domain": "physics.stackexchange", "id": 81499, "tags": "electromagnetism" }
Planetary Systems
Question: How does one know the correct position of planets in relation to the sun when viewing the solar system from different angles makes the appearance of the planets different? I would think that it would require a satellite to orbit in a circular pattern above and below the entire solar system to get correct bearings. Answer: One perspective (heh) involves the following relation among position vectors: $$\vec{r}_{A\rightarrow C} = \vec{r}_{A\rightarrow B} + \vec{r}_{B\rightarrow C}.$$ These position vectors can be for anything; object $A$ could be a house, object $B$ an ant, and object $C$ a leaf on the river. Here's a diagram to help: So if you want to know the position of object $C$ relative to object $A$ (the bold dark arrow), you just have to know the position of some other object $B$ relative to those others. To answer your question, you can apply this same idea to the solar system: $$\vec{r}_{\mathrm{Sun} \rightarrow X} = \vec{r}_{\mathrm{Sun} \rightarrow \mathrm{Earth}} + \vec{r}_{\mathrm{Earth} \rightarrow X}.$$ Or in pictures: The position of planet $X$ relative to the sun (bold dark arrow, which is what we want) can be found if we know Earth's position relative to the Sun and planet $X$'s position relative to Earth. In this way, measurements of a planet's position as measured from here on Earth can be used to get a map of the solar system.. There is the added complication of knowing distance to planets and coming up with a convenient coordinate system in order to actually come up with values for these position vectors. Others may have better information on that.
{ "domain": "physics.stackexchange", "id": 12940, "tags": "astronomy, planets" }
Longest subarray with at most two different values - Runtime complexity for a DP solution
Question: Consider the problem of finding, for a given input array, the longest subarray with at most two different values. For example: Input: [3,3,3,1,2,1,1,2,3,3,4] Ans = 5, the longest subarray would be [1,2,1,1,2]. Input: [1,2,3,2,2] Ans = 4, the longest subarray would be [2,3,2,2]. Below is dynamic programming solution to this problem (in Python, hopefully it's easy to read) using a sliding window that holds a "valid subarray" (the subarray of elements between indices i and j always holds two values at most). I read on e.g. LeetCode that this solution has a runtime complexity of $O(N)$ where $N$ is the length of the input array, but that's not immediately clear to me since we have two nested loops with $i$ and $j$ and $0\leq i\leq j\leq n$. Why is the worst-case runtime complexity of this solution $O(N)$ and not $O(N^2)$? Here's the DP solution in question with those nested loops holding a subarray between $i$ and $j$: def longest_subarray_holding_two_diff_values (input_array): ans = i = 0 count = collections.Counter() for j, x in enumerate(input_array): count[x] += 1 while len(count) >= 3: count[input_array[i]] -= 1 if count[input_array[i]] == 0: del count[input_array[i]] i += 1 ans = max(ans, j - i + 1) return ans Answer: Here is how the sliding window algorithm works (unfortunately, I don't understand your code, so can't say whether this is the same algorithm). We keep track of two pointers $i,j$, with the following properties: the subarray $A[j],\ldots,A[i]$ contains exactly two values, and it is maximal with respect to $j$ (that is, either $j = 0$ or $A[j-1],\ldots,A[i]$ contains three values). We also keep track of the two values in question $a,b$, and of their last appearance $k_a,k_b$. Finally, we keep track of the longest valid subarray seen so far. In the initialization phase, we scan the array until we see two different values; if the array is constant, then the answer is the length of the array. At steady state, we take a peek at $A[i+1]$. If $A[i+1] \in \{a,b\}$, we update $k_a$ or $k_b$, and simply increase $i$. If $A[i+1] \notin \{a,b\}$, then we do two things. First, we update the value of the longest valid subarray seen so far (comparing it to $j-i+1$). Second, suppose that $A[i] = a$; then we set $j = k_b+1$, set $b = A[i+1]$, set $k_b = i+1$, and increment $i$. Finally, when reaching $i = n$, we update the value of the longest valid subarray (comparing it to $j-i+1$), and output the result. As you can see, this algorithm performs $O(1)$ operations per iteration, so runs in $O(n)$ time.
{ "domain": "cs.stackexchange", "id": 15900, "tags": "time-complexity, dynamic-programming, arrays, python" }
Failure of the Pumping Lemma
Question: On the Wikipedia page for the Pumping Lemma for Context-Free Languages, a language, $$ \{b^j c^k d^l | j, k, l \in N\} \cup \{a^i b^j c^k d^l | i > 0, j = k = l\} $$ is introduced. The pumping lemma proof for this supposedly fails because, given a string without a's, you can pump the b's and, given a string with a's, you can pump the a's. In either case, all of the possible cuttings are pumpable according to the constraints of the language. For example, given the string $$ a^k b^k c^k d^k $$ you can split vwx over |a| and pump, leaving |b| = |c| = |d|. Why can the string $$ a b^k c^k d^k $$ not be used? This seems to meet all the constraints of the pumping lemma, and you cannot split vwx across |a|. If v = a then x must contain at least one b. The string cannot be pumped for any i > 2 as the |b| will exceed the length of |c| and |b| will exceed the length of |d|. Is there some reason that |a| > 1 or |a| = k? Answer: The condition of the Pumping lemma is fulfilled for words of the form $ab^kc^kd^k$. Pick $v=a$, $u=x = \varepsilon$, and $w$, $y$ arbitrary.
{ "domain": "cs.stackexchange", "id": 8521, "tags": "formal-languages, context-free, pumping-lemma" }
Would 2 stars in binary star formation with same mass and no velocity collide into each other?
Question: Velocity decides if 2 stars with different masses in a binary star formation will collide or not, but if 2 stars with same mass existed but with no velocity (i.e they are not orbiting each other, just staying at their places at some distance), would they collide ? I guess the answer is no, but just not sure of how and where do the gravitational wave friction come into the scenario. Answer: It seems the crux of your question lies in a misunderstand of how gravity works. So I'll try to answer your question by correcting your perceptions about gravity. Gravity is a force imparted by objects with mass on all other objects with mass. The range of a gravitational force is infinite which means that galaxies millions and billions of light years away are technically applying a gravitational force on you, its just so infinitesimally small as to be not noticeable. The general equation for the force of gravity between two objects of mass $M_1$ and $M_2$ at a separtion of $r$ is given by $$F_g = G\frac{M_1M_2}{r^2}$$ where $G$ is the gravitational constant equal to $6.67\times10^{-11}\ \mathrm{m^3\ kg^{-1}\ s^{-2}}$. If you had nothing in the universe except the Earth ($M_1 = 5.97\times10^{24}\ \mathrm{kg}$) and the Moon ($M_2 = 7.35\times10^{22}\ \mathrm{kg}$) at their current distance of $r = 3.8\times10^7\ \mathrm{m}$, you'd find that $F_g = 2\times10^{22}\ \mathrm{N}$ where $N$ a Newton - a unit of force equivalent to $\mathrm{kg\ m\ s^{-2}}$. So in this scenario you calculate the gravitational force experienced by each body. The important point here is that both bodies experience this singular force. The Moon is pulled towards the Earth with a force equal to $2\times10^{22}\ \mathrm{N}$ and the Earth is likewise pulled towards the Moon by the same force equal to $2\times10^{22}\ \mathrm{N}$. They cannot and do not cancel each other out because each body is feeling one and only force of gravity. The Moon knows it experiences a gravitational force from the Earth and so it responds to that force by moving towards the Earth. It also happens to apply a gravitational force on the Earth to pull the Earth towards itself, but that force is the same force. So in your scenario, two neutron stars which are just sitting next to each other in space would experience a singular gravitational force between them and pull themselves towards each other, eventually colliding. You can hopefully see the above argument works whether the two objects are the Moon and Earth, two neutron stars, or even Earth and you. Due to the mutual, single gravitational force within the system, the two bodies will be attracted to one another. Of course, everything I've just said is the "Newtonian" description of gravity. A more complete and accurate picture is achieved by considering this from a General Relativity standpoint.
{ "domain": "astronomy.stackexchange", "id": 2555, "tags": "binary-star" }
ROS Answers SE migration: URDF or Xacro?
Question: I am new to ROS and I have been using SolidWorks Exporter for generating URDF for my robot. Things didn't go well adding transmissions and controller plugins, Gazebo crashes if I spawn the urdf with controller plugins and transmissions, so I started studying other robots in ROS in detail (for example universal-robot) and all of them had Xacro files. Should I also use Xacro? Or the URDF generated by SolidWorks should work fine with transmissions and controller plugins? If I should use xacro, how do I generate a Xacro? Thanks for your attention. Originally posted by Oguz on ROS Answers with karma: 121 on 2015-01-31 Post score: 3 Answer: Xacro is just a scripting mechanism that allows more modularity and code re-use when defining a URDF model. When using it, what is actually uploaded to the parameter servers (per default as the "robot_description" parameter) actually is a URDF, as that gets generated from the xacro file in the launch file (by expanding the xacro macros used). As an example, say you have a "robot.urdf.xacro" file. You can run rosrun xacro xacro.py robot.urdf.xacro > robot.urdf and that gives you the URDF generated from your xacro file. The same approach is commonly used in launch files when a xacro-based robot_description is uploaded. Xacro is just another way of defining a URDF, not an alternative to it. It makes certain things easier, for instance you can generate a "wheel" macro and instantiate that 6 times with different parameters to put 6 wheels on your robot, as opposed to copying and pasting the same code six times manually. Originally posted by Stefan Kohlbrecher with karma: 24361 on 2015-01-31 This answer was ACCEPTED on the original site Post score: 15 Original comments Comment by Oguz on 2015-01-31: Thanks, then I will stick to my SolidWorks Exporter generated URDF. Comment by Yanhao Zhu on 2019-08-17: Also new to urdf and have a similar confusion. Really a good answer!
{ "domain": "robotics.stackexchange", "id": 20744, "tags": "microcontroller, urdf, xacro, plugin, transmission" }
Deutsch's algorithm in Qiskit
Question: I am trying to understand and implement the Deutsch algorithm. I follow the logic from Nielsen book and I started to implement it in Qiskit. For implementing the oracle, I use a CNOT gate and now I have this circuit: Every time I ran it, the first qubit is always in state $|1\rangle$. I am not sure if this is what I expect. Shouldn't it be sometimes $|1\rangle$ and sometimes $|0\rangle$? Answer: CNOT gate is an example that implements a balanced function for which $f(0) = 0$ and $f(1) = 1$: \begin{equation} CNOT \frac{1}{2}\left(|0\rangle + |1\rangle \right) \left(|0\rangle - |1\rangle \right) = \\ = \frac{1}{2}|0\rangle \left(|0 \oplus f(0)\rangle - |1 \oplus f(0)\rangle \right) + \frac{1}{2}|1\rangle \left(|0 \oplus f(1)\rangle - |1 \oplus f(1)\rangle \right) = \\ = \frac{1}{2} \left(|0\rangle - |1\rangle \right) \left(|0\rangle - |1\rangle \right) \end{equation} $$H \otimes I \frac{1}{2}\left(|0\rangle - |1\rangle \right) \left(|0\rangle - |1\rangle \right)= \frac{1}{\sqrt{2}}|1\rangle \left(|0\rangle - |1\rangle \right)$$ It means that if we will do everything right we always should obtain (for balanced functions $f(0) \ne f(1)$) $|1\rangle$ outcome in the Deutsch algorithm. For more look at 1.44 equation (page 33) in the M. Nielsen and I. Chuang textbook, where one can find the final state before the measurement. In the 1.44 system, one can see that if $f(0) = f(1)$ then the first qubit will be in $|0\rangle$ state and if $f(0) \ne f(1)$ (like the case with CNOT) then the first qubit will be in $|1\rangle$ state before the measurement.
{ "domain": "quantumcomputing.stackexchange", "id": 1507, "tags": "qiskit, nielsen-and-chuang, deutsch-jozsa-algorithm" }
Difference between product automaton vs. NFA with epsilon transitions
Question: Let $L_1, L_2$ be regular languages. You can proof with a product automaton that $L_1\cup L_2$ is also regular. But you can also construct a NFA. Since $L_1, L_2$ are regular, they also have an NFA representation $M_1, M_2$. You can create a NFA with a start state $q_0$ with one edge each to the start states of $M_1,M_2$ with an epsilon transition. So I wonder what is the difference, advantages and disadvantages? Thx! Answer: Both constructions work. The advantages of using NFAs is that they allow easier constructions, especially for languages that can be naturally specified using existential quantifiers. So if we only care about proving whether some language is regular, then easy design wins. As you've wrote, putting two automata next to each other and guessing which automaton to run on the input word is a straight-forward construction compared to the product construction, and indeed, in this case we want to check whether there exists an automaton (one of the two automata that we started with) that accepts the input word: the construction simply guesses which automaton to run on the input. Another classical example is closure of regular languages under concatenation, where we want to check whether there exists a partition of the input word into two words such that the first word is accepted by one automaton, and the second word is accepted by the other automaton. Here, we guess while reading the input word when we branch and run the second automaton on the suffix to be read. Also, many more examples exist where NFAs allow for conceptually easier constructions. Its just that existential properties and NFAs work well with each other due to the existential semantics of nondeterminism: a nondeterministic automaton accepts an input if there exists a run of it on the input. The disadvantages of such constructions is that while they are conceptually simple, they do not necessarily preserve determinism. So while nondeterminism allows more elegant designs, we prefer constructions that end up with deterministic automata (preferable small ones) since decision procedures about deterministic automata are considebraly easier than decision procedures about nondeterministic automata: language containment, minimization, etc, are all in PTIME for deterministic automata, yet PSPACE-complete for nondeterministic ones. In this sense, the product construction is better as it preserves determinism (the product of deterministic automata is deterministic) and results in an automaton of quadratic size (which is at most polynomial in the two automata that we started with -- good!).
{ "domain": "cs.stackexchange", "id": 21833, "tags": "automata, finite-automata" }
Measuring Weight with Multiple Scales
Question: If I am weighting a long tube of (nearly) uniform weight with multiple hang-scales (really a 10-ft long Helical Antenna), how do I calculate the total weight from the multiple scales? Can I just add the measured weights at each point regardless of the spacing? If the spacing is non-uniform, does the total weight calculation change? Answer: Assuming that antenna is in static equilibrium (i.e. not moving) the only forces holding it up are forces exerted by the scales, the forces applied by the scale are applied directly upwards (this one is important!) then the weight will in fact equal the sum of the forces exerted by each scale. So you can sum up all the measurements of each scale. The spacing does not matter, but, again, ensure that the strings holding up the antenna are pointing straight up (anti-parallel to gravity force). Otherwise, you will have to measure the angle at which these forces are applied, and account for that. If you want to test out the accuracy of this technique (which I strongly recommend -- just because this is sound on paper does not mean it'll work well given the outlined constraints), I would suggest to weigh a more lightweight object (like a plank, similar length to your antenna) with initially 1 scale, then 2 scales, then 3, etc. and see how much each set of results deviates. EDIT: user alephzero makes some excellent suggestions in the comments.
{ "domain": "physics.stackexchange", "id": 74690, "tags": "measurements, weight" }
Can a scientific theory ever be absolutely proven?
Question: I personally cringe when people talk about scientific theories in the same way we talk about everyday theories. I was under the impression a scientific theory is similar to a mathematical proof; however a friend of mine disagreed. He said that you can never be absolutely certain and a scientific theory is still a theory. Just a very well substantiated one. After disagreeing and then looking into it, I think he's right. Even the Wikipedia definition says it's just very accurate but that there is no certainty. Just a closeness to potential certainty. I then got thinking. Does this mean no matter how advanced we become, we will never become certain of the natural universe and the physics that drives it? Because there will always be something we don't know for certain? Answer: Simple Answer: Nothing is guaranteed 100%. (In life or physics) Now to the physics part of the question. Soft-Answer: Physics uses positivism and observational proof through the scientific process. No observation is 100% accurate there is uncertainty in all measurement but repetition gives less chance for arbitrary results. Every theory and for that matter laws in physics are observational representations that best allow prediction of future experiments. Positivism can overcome theological and philosophical discrepancies such as what is the human perception of reality. Is real actually real type questions. The scientific process is an ever evolving representation of acquired knowledge based on rigorous experimental data. No theory is set in stone so to speak as new results allow for modification and fine tuning of scientific theory.
{ "domain": "physics.stackexchange", "id": 32075, "tags": "soft-question, epistemology" }
Byte conversion in my Caesar's cipher
Question: I was wondering if I am doing any useless conversions byte←→int, for example: byte((int(ch-'A')+shift)%26 + 'A') Converting ch-'A' to an int is because the shift argument can be negative (to implement the decode function). I couldn't figure out a simpler way to negate the operation using bytes. package main import ( "bufio" "flag" "fmt" "os" ) func main() { shift := flag.Int("shift", 13, "Cipher shift") decode := flag.Bool("decode", false, "Decode input") flag.Parse() scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { if *decode { fmt.Println(Decode(scanner.Text(), *shift)) } else { fmt.Println(Encode(scanner.Text(), *shift)) } } } func Encode(s string, shift int) string { return cipher(s, shift) } func Decode(s string, shift int) string { return cipher(s, -shift+26) } func cipher(s string, shift int) string { var line string for _, ch := range []byte(s) { if ch >= 'A' && ch <= 'Z' { ch = byte((int(ch-'A')+shift)%26 + 'A') } else if ch >= 'a' && ch <= 'z' { ch = byte((int(ch-'a')+shift)%26 + 'a') } line += string(ch) } return line } Answer: Flags It's not very problematic for very short programs like this, but better get good habits: flags should be outside of all code blocks; so that you detect immediately if you have a flag naming conflict between different files. var ( shiftF = flag.Int("shift", 13, "Cipher shift") decodeF = flag.Bool("decode", false, "Decode input") ) func main() { … } Code organization You're checking for the value of the -decode flag every time you scan a line, and recomputing -shift+26 every time as well: not very efficient, nor super readable. As you noticed, the only difference between encoding and decoding is the value of shift. Why not do something like: func main() { flag.Parse() // we could also directly change the value of the flag, but I find it // less readable — it's better to treat flag values as immutable. var shift int if *decode { shift = 26 - *shiftF } else { shift = *shiftF } scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { cipher(scanner.Text(), shift) } } Type conversions bufio.Scanner holds bytes internally: the doc for Scanner.Bytes says: The underlying array may point to data that will be overwritten by a subsequent call to Scan. It does no allocation. while Scanner.Text indicates that it returns: a newly allocated string. So here, you allocate a string to copy bytes; and then you allocate a new slice of type []byte to copy this string, which is also an expensive operation. So you're saving two conversions by not dealing with strings at all: func main() { … for scanner.Scan() { fmt.Printf("%s\n", cipher(scanner.Bytes(), shift)) } } func cipher(bytes []byte, shift int) []byte { var line []byte for _, b := range bytes { if b >= 'A' && b <= 'Z' { b = byte((int(b-'A')+shift)%26 + 'A') } else if b >= 'a' && b <= 'z' { b = byte((int(b-'a')+shift)%26 + 'a') } line = append(line, b) } return line } Note the slight change in the way you print the result: using fmt.Println on a []byte variable will print the numeric value of each byte, something like [116 104 101 32 103 97 109 101], so you have to use fmt.Printf to tell it to print it like a string. You're also wondering whether the conversions from byte to int are inefficient: kind of, yes. Arithmetic operations work on bytes, so you could do instead: shiftB := byte(shift) b = ((b-'A')+shiftB)%26 + 'A' However, this is just a simple type conversion: the performance gain from it will be much lower than when avoiding memory allocations, so you shouldn't worry about it too much =) Optimizations Initializing a []byte variable and appending char by char to it is not great: the slice is going to be re-sized every time it hits its maximum default size. It's even worse in your code: you're initializing a string variable and appending char by char to it, which will reallocate the string each time. In cipher, you know in advance which size line is going to be: the same as the original slice. So you can initialize directly with the correct allocated capacity: line := make([]byte, 0, len(bytes)) okay, now let's go even further: calling append at each iteration means that under the hood, there will be a check to see if the slice over capacity. We know it's not going to happen, because the sizes are the same: why not always allocate the exact amount of memory that we need and write at the right place directly? func cipher(bytes []byte, shift int) []byte { shiftB := byte(shift) line := make([]byte, len(bytes)) for i, b := range bytes { if b >= 'A' && b <= 'Z' { b = ((b-'A')+shiftB)%26 + 'A' } else if b >= 'a' && b <= 'z' { b = ((b-'a')+shiftB)%26 + 'a' } line[i] = b } return line } We can go even further by noticing that the bytes argument won't be used after we call cipher. So we can re-use this slice instead of allocating a new one! func cipher(bytes []byte, shift int) []byte { shiftB := byte(shift) for i, b := range bytes { // shift b… bytes[i] = b } return bytes // optional: callers could simply reuse the argument } Final words The code we obtain is more readable, definitely more efficient; and has the same behavior as yours. However, there are still two potential issues with it: Encoding: we're considering every byte of the input separately, as if it were ASCII; so it will probably not do what you want if the input is in UTF-8. Input size: if the input has very long lines, then using bufio.NewScanner is a bad idea; see for example this SO question.
{ "domain": "codereview.stackexchange", "id": 23338, "tags": "go, caesar-cipher, casting" }
Roslaunch not working
Question: I have two nodes - Mission Control and Simulator. Mission control publishes to command topic and subscribes to telemetry topic while simulator node subscribes to command topic and publishes to telemetry topic. When I use rosrun both runs fine but only mission control node runs when I use roslaunch command. the printed message on the terminal is "The Goal Latitude is 33 and Goal Longitude is -84" and it keeps on running ... No error message. I also know that both nodes are running (used rosnode list to check it). Also both telemetry topic and commands topic are running (rostopic list) The following is my XML code. Please help. Thanks in advance. <launch> <group> <node pkg="groundcontrolstation" type="missioncontrol.py" name="mission_control" output="screen"/> <node pkg="groundcontrolstation" type="simulator.py" name="simulator" output="screen" /> </group> </launch> Originally posted by prr_shyam on ROS Answers with karma: 3 on 2015-06-02 Post score: 0 Original comments Comment by gvdhoorn on 2015-06-03: As already said in your duplicate post, please include any error messages you get. There is not enough information in your post to help you. Comment by prr_shyam on 2015-06-03: hello as I mentioned above in the post , I am not getting any error message the node keeps on running but one more thing I found out was while using rosrun I have to run simulator node before mission control node for it to work otherwise between the nodes no data is passed. Thank you Answer: If I understand your question: when you run the simulator.py node before missioncontrol.py everything is okay, but when you run the two nodes simultaneously (using the launch file) no data is passed between them. In other terms: the start up order is very important in your case. First, the documentation of roslaunch specify clearly that : Roslaunch intentionally does not provide any control on the order or timing of node start up So if you want that the simulator.py node run before missioncontrol.py, you can simply add a waiting time in the code of this last one, for example: rospy.sleep(2) This will force the second node to sleep for two seconds so that the first node will have all the time necessary to initialize and publish its messages. However, this is like a "brute force" solution, and a nicer one will be to use the following function: wait_for_message(topic, topic_type, timeout=None) which will block the execution of the script until a message is published in the specified topic. Originally posted by Horse-man with karma: 229 on 2015-06-03 This answer was ACCEPTED on the original site Post score: 1 Original comments Comment by prr_shyam on 2015-06-05: Thanks a lot. It works now. Comment by Horse-man on 2015-06-05: you're welcome.
{ "domain": "robotics.stackexchange", "id": 21827, "tags": "roslaunch" }
rosbag 'UnicodeDecodeError'
Question: I have some rosbags. Most of them seem to be okay according to 'rosbag check '. But a few of them cause the check to be aborted in the middle of the bag with the error below. What is the best way to avoid this decoding error? Traceback (most recent call last): File "/opt/ros/indigo/bin/rosbag", line 35, in <module> rosbag.rosbagmain() File "/opt/ros/indigo/lib/python2.7/dist-packages/rosbag/rosbag_main.py", line 863, in rosbagmain cmds[cmd](argv[2:]) File "/opt/ros/indigo/lib/python2.7/dist-packages/rosbag/rosbag_main.py", line 452, in check_cmd migrations = checkbag(mm, args[0]) File "/opt/ros/indigo/lib/python2.7/dist-packages/rosbag/migration.py", line 76, in checkbag for topic, msg, t in bag.read_messages(raw=True): File "/opt/ros/indigo/lib/python2.7/dist-packages/rosbag/bag.py", line 2331, in read_messages yield self.seek_and_read_message_data_record((entry.chunk_pos, entry.offset), raw) File "/opt/ros/indigo/lib/python2.7/dist-packages/rosbag/bag.py", line 2469, in seek_and_read_message_data_record msg_type = _get_message_type(connection_info) File "/opt/ros/indigo/lib/python2.7/dist-packages/rosbag/bag.py", line 1565, in _get_message_type message_type = genpy.dynamic.generate_dynamic(info.datatype, info.msg_def)[info.datatype] File "/opt/ros/indigo/lib/python2.7/dist-packages/genpy/dynamic.py", line 168, in generate_dynamic tmp_file.file.write(full_text.encode()) UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 2788: ordinal not in range(128) Originally posted by Sebastian Rockel on ROS Answers with karma: 23 on 2016-07-28 Post score: 1 Original comments Comment by tik0 on 2017-06-08: Please sign the answer as correct if it was helpful. Answer: If you take a look at the error you'll see that it is raised by the message definition which rosbag tries to interpret. You possibly have personal message definitions with UTF-8 characters in it. While the tool is written in python, there always remains the pain with UTF-8 decoding what you can see in your last line: UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 2788: ordinal not in range(128) Just check which of your message definitions are UTF-8 encoded by the command line tool file yourMessageDefinition.msg. If it tells you ASCII text the definition should be OK, if UTF-8 Unicode text it is not. Just open the file and substitute all special characters like µ or ⊘ even in your comments. If you are unsure which character in your file is special, just open up a hex editor (like ghex), open the file and search for characters which occupy more than one byte. Then re-build the message definitions. Originally posted by tik0 with karma: 220 on 2016-08-29 This answer was ACCEPTED on the original site Post score: 2 Original comments Comment by Kansai on 2021-04-22: My defintion is ASCII Text and yet I got this error when running rqt_bag
{ "domain": "robotics.stackexchange", "id": 25386, "tags": "rosbag" }
Reading meters with tensorflow
Question: I'm new to ML world and been reading about ML and TensorFlow. My goal is to read the following example in real time with Android phone: So I tried firebase ML OCR and it works really good, it reads the complete value but it does not read the decimal point and also reads a lot of the surrounding text. So my idea is that I should first detect black and red bounding boxes and then detect individual numbers inside is this the right way to go? How would I accomplish this? Also how do you use two kinds of a model, one to extract a part of the image (black and red bounding areas) and then pass them to OCR model? What about last digit which can always be in between two numbers (example: 1 and 2)? Answer: Two options : Use pre-built libraries for OCR + Bounding Box detection (E.g.: https://www.pyimagesearch.com/2018/08/20/opencv-text-detection-east-text-detector/ for Bouding Box detection and then OpenCV / Tessract for OCR) Train a Deep learning model for Text Detection in scene . Examples : https://github.com/qjadud1994/CRNN-Keras and https://github.com/mvoelk/ssd_detectors
{ "domain": "datascience.stackexchange", "id": 4680, "tags": "machine-learning, tensorflow, ocr" }
catkin command passes with non-existent ROS package "pcl"
Question: In pcl tutorial, catkin_create_pkg my_pcl_tutorial pcl pcl_ros roscpp sensor_msgs AFAIK there's no package called pcl in hydro, and catkin_create_pkg only takes catkin packages as an argument, but this command passes without errors and creates a package. Why is it? Originally posted by 130s on ROS Answers with karma: 10937 on 2014-01-01 Post score: 0 Answer: Hello! When you call catkin_create_pkg it just lists everything after the package name as a dependency in the package.xml file, so you could probably put anything in there and it would at least successfully run that command. It may not actually compile once you get to the catkin_make step. Also as a side note: PCL is the Point Cloud Library, and the ros package for it is called ros-hydro-pcl-ros in your package manager (assuming you're using Ubuntu). -Tim Originally posted by Tim Sweet with karma: 267 on 2014-01-01 This answer was ACCEPTED on the original site Post score: 0 Original comments Comment by ahendrix on 2014-01-02: Added to that, the upstream PCL package is called pcl, which is a valid rosdep dependency.
{ "domain": "robotics.stackexchange", "id": 16552, "tags": "pcl, ros-hydro" }
Which Enzymes are Responsible for the Biodegradation of Beta-endorphin?
Question: Which enzymes are responsible for the biodegradation of the endogenous opioid peptide, beta-endorphin? Answer: As far as I have been able to find out, there isn't a definitive answer to this question. β endorphin is a peptide, and it would seem that there are numerous brain peptidase enzymes that are implicated in the hydrolysis of β endorphin and other peptide neurotransmitters. These include aminopeptidase N, membrane-dipeptidase A, angiotensin-converting enzyme and neutral endopeptidase (also called enkephalinase). For an early (1984) review of this topic see here; there doesn't seem to be a more up-to-date review, but there is quite a bit of literature on the effects of inhibitors of these various enzymes.
{ "domain": "biology.stackexchange", "id": 737, "tags": "biochemistry, enzymes, neurotransmitter" }
Show the results of linked list operations in sequence
Question: I'm working on a basic linked list programs. In this program I can insert elements at the beginning, at the middle (after one specific element) of the list and at the end. I also can delete a specific element or delete an element from the end of the list. I have all these four methods in the code. Now I was trying in the main method to perform this operations in sequence: insert at the beginning insert at the middle delete a specific element delete an element from the list. First I want to add 10 elements at the beginning of the list {50, 70, 80, 100, 77, 200, 44, 70, 6, 0}. Then I want to add 10 elements at the middle { 5, 20, 10 ,30 ,7 , 8, 2, 104, 1, 22} after the element 200. Then I want to add 10 elements at the end {40, 30, 20, 1, 7, 76, 4 , 0, 80, 2}. Then I want to delete a specific element 76 and then delete two elements at the end of the list. So, I want the final list as {0, 6, 70, 44, 200, 22, 1, 104, 2, 8, 7, 30, 10, 20, 5, 77, 100, 80, 70, 50, 40, 30, 20, 1, 7, 4, 0}. I don't know if it's a good way. Do you think the code in the main method is OK, the for parts particularly? Main method: int main() { printf("hi"); int i=0; int listsize=10; int arrBegining[] = {50,70, 80, 100, 77, 200, 44, 70, 6, 0}; int arrMiddle[] = {5, 20, 10, 30, 7, 8, 2, 104, 1, 22}; int arrEnd[] = {40, 30, 20, 1, 7, 76, 4 , 0, 80, 2}; for(i=0;i<listsize;i++){ insert_at_begning(arrBegining[i]); } for(i=0;i<listsize;i++){ insert_at_middle(arrMiddle[i], 200); } for(i=0;i<listsize;i++){ insert_at_end(arrEnd[i]); } for(i=0;i<listsize;i++){ delete_from_middle(76); } for(i=0;i<2;i++){ delete_from_end(); } } List operations: void insert_at_begning(int value) { var=(struct node *)malloc(sizeof (struct node)); var->data=value; if(head==NULL) { head=var; head->next=NULL; } else { var->next=head; head=var; } } void insert_at_middle(int value, int loc) { struct node *var2,*temp; var=(struct node *)malloc(sizeof (struct node)); var->data=value; temp=head; if(head==NULL) { head=var; head->next=NULL; } else { while(temp->data!=loc) { temp=temp->next; } var2=temp->next; temp->next=var; var->next=var2; } } int delete_from_middle(int value) { struct node *temp,*var; temp=head; while(temp!=NULL) { if(temp->data == value) { if(temp==head) { head=temp->next; free(temp); return 0; } else { var->next=temp->next; free(temp); return 0; } } else { var=temp; temp=temp->next; } } printf("data deleted from list is %d",value); } int delete_from_end() { struct node *temp; temp=head; while(temp->next != NULL) { var=temp; temp=temp->next; } if(temp ==head) { head=temp->next; free(temp); return 0; } printf("data deleted from list is %d",temp->data); var->next=NULL; free(temp); return 0; } Answer: Typo in method name? insert_at_begning looks like it should be called insert_at_beginning? insert_at_middle The method name doesn't seem to match what it actually does. It looks more like it's insert_after. It also looks like it has a memory leak, what happens if you attempt to insert after a value that isn't currently in the list? var var is not a good name for a variable, it is totally nondescript. It is even worse when it is declared at a global/file scope where it's declaration isn't even visible. You appear to be using a global variable var in your insert methods, however have a local var in your delete_from_middle method. This is confusing. Variable should be named after what they represent from a logical perspective. You should also try to avoid using the same name for variables in nested scopes. main Your main looks mostly ok as a test harness, however it's rather inefficient. Since your list doesn't maintain a tail pointer, inserting at the end is much slower than inserting at the beginning. Similarly, inserting in the middle is relatively slow. It would be more efficient to insert everything from the end to the front, rather than from the front to the end. I'd also personally store the arrays to be inserted in the order that I wanted them to be in the list (not backwards), then iterate through them in reverse order. I know this has the same effect, but when I look at the code I would then be able to see the list rather than having to mentally flip the arrays.
{ "domain": "codereview.stackexchange", "id": 23140, "tags": "c, linked-list" }
Show $\epsilon e^{{i}{\vec\phi\over2}\cdot \vec{\sigma}^*} (-\epsilon) =e^{-{\vec\phi\over2}\cdot \vec\sigma} $ for Pauli matrices
Question: In pg. 76 of the Physics from Symmetry book, it was stated that the following relation is true: $$\epsilon e^{{i}{\vec\phi\over2}\cdot \vec{\sigma}^*} (-\epsilon) =e^{-i{\vec\phi\over2}\cdot \vec\sigma} $$ where $\epsilon = \begin{pmatrix} 0 & 1 \\ -1 & 0\end{pmatrix}$, $\vec\phi$ is a constant vector and $\vec\sigma = (\sigma_x,\sigma_y,\sigma_z)$ represent the Pauli matrices. But for a function $f(\epsilon \sigma_i \epsilon^{-1})$ It was stated that this was shown true by using the fact that $$\epsilon\sigma_i^*(-\epsilon)=-\sigma_i.$$ I had been able to prove that the first relation is true up to a second order expansion in Pauli matrices explicitly. However, is there an easier way to see that it is true generally without having to explicitly expand to the $n^\text{th}$ order? Edit: I had tried the methods the comments and answer had suggested but is still unsuccessful. The main challenge I face is that in the expansion of $\epsilon e^{{i}{\vec\phi\over2}\cdot \vec{\sigma}^*} (-\epsilon)$ there will be cross terms like $\epsilon {\sigma_x^*}^n {\sigma_y^*}^m {\sigma_z^*}^l (-\epsilon)$ where $n,m,l$ are integers. If it can be shown that $\epsilon {\sigma_x^*}^n {\sigma_y^*}^m {\sigma_z^*}^l (-\epsilon) = (-1)^{n+m+l} {\sigma_x}^n {\sigma_y}^m {\sigma_z}^l $ , then the first relation will be true. However, I don't know how to show it. Answer: $$\epsilon = i\sigma_2 = -\epsilon^{-1}, \\ \leadsto \epsilon \sigma_i \epsilon^ {-1}= -\sigma_i^*, $$ the conjugate representation. As a similarity transformation it leads to $$ \epsilon \sigma_i^n \epsilon^ {-1}= (-\sigma_i^*)^n, \leadsto \\ f(\epsilon \sigma_i \epsilon^ {-1})= f(-\sigma_i^*), $$ for any function f regular at the origin, including the exponential.
{ "domain": "physics.stackexchange", "id": 73783, "tags": "group-theory" }
Where does the light of the Big Bang come from?
Question: I'm wondering whether the residual light of the Big Bang comes from one particular direction and what possibilities do we have to detect its position? Answer: By "the light from the big bang", you must mean the cosmic microwave background, which did not come from the big bang directly, but was emitted during recombination about 400,000 years after the big bang. At that time, it was emitted from basically all directions and locations in space. The universe was born hot, and cooled gradually as it expanded, meaning the photons were becoming gradually less energetic. Photons and matter in the universe were interacting constantly, and light could not travel very far before interacting with protons and electrons and changing direction. Electrons and protons were also coming together to form neutral hydrogen atoms, but these were quickly dissociated by photons. The cosmic microwave background was emitted when there were no longer enough photons with sufficient energy to break neutral hydrogen apart in to free protons and electrons. Once this became true, photons just kept on traveling in whatever direction they were going, without interacting. As this happened everywhere in the universe at just about the same time, you can see light that is part of the cosmic microwave background from any point in the universe, that will happen to show you light that is 13.7 billion years old (the time in the past when the cosmic microwave background was emitted).
{ "domain": "physics.stackexchange", "id": 3028, "tags": "cosmology, electromagnetic-radiation, big-bang, cosmic-microwave-background" }
Unfair semaphore in Java
Question: I'm tutor for a university operating systems course, and part of my work is creating exercises for them. Those exercises are mostly about concurrency and synchronization issues. For the next exercise I intend to show them the finished implementation of an unfair counting semaphore, and their job is to make a fair one. Because of a lot of factors I'm constrained to using Java as implementation language. Though I'm fully capable of writing Java programs, it's not exactly my "mother tongue". As such, I'm probably unaware of some idioms and best practices. As I want to give my students not just something that "works", but code that could actually be found in a production system, I hope you can give me suggestions for my implementation: /** Simple, unfair counting semaphore. This class implements a simple and unfair counting semaphore. */ public class UnfairSemaphore implements Semaphore { /** Count of available permits. */ protected int count; /** Object used to communicate with waiting threads. A distinct object is used herefore (and not the semaphore itself) to keep any signals from notify() behind the abstraction. */ protected Object wire = new Object(); /** Construct a semaphore with the given initial count. Sets the initial count of the newly constructed semaphore to the given value, which may be negative. */ public UnfairSemaphore(int initial) { count = initial; } /** Construct a semaphore with 1 initial permit. */ public UnfairSemaphore() { this(1); } // Implementation public void acquire() throws InterruptedException { synchronized (wire) { while (count <= 0) { wire.wait(); } --count; } } // Implementation public void release() { synchronized (wire) { ++count; if (count > 0) { wire.notifyAll(); } } } } Semaphore is an interface with just the two methods acquire and release. I'm using the member wire to synchronize on and to call notifyAll and wait in order to keep the signals resulting from these "behind the abstraction", that is to make them invisible to the code using the semaphore. (I could use just notify here, couldn't I?) I'm neither interested in making this code faster or to use different, possibly better classes (Atomic* or whatever) to implement this. What I do want is code that correctly handles all cases. (I just don't want to give them "educational code" which shows the idea, but fails to reliably work in practice) Answer: I don't like using the object as a mutex and Condition variable. Doing that would let another programmer spin up a thread to lock and never release the Semaphore. Instead you should either provide a private Object lock = new Object(); and sync on that or use the java.util.concurrent.locks.ReentrantLock and its condition: private final Lock mutex = new ReentrantLock(); private final Condition condition = mutex.newCondition(); public void acquire() throws InterruptedException { mutex.lockInterruptibly();//let interrupt throw as soon as possible try{ while (count <= 0) { condition.await(); } --count; } finally { mutex.unlock(); } } // Implementation public void release() { mutex.lock(); try{ ++count; if (count > 0) { condition.signal(); } } finally { mutex.unlock(); } } This makes it easy to change the semaphore to a fair one by passing true to the constructor. Though I expect you will want to implement the wait queue yourself as an example. If you want to go a step lower and no use the pre-implemented locks can instead use LockSupport which lets to park the current thread and unpark it from another thread safely.
{ "domain": "codereview.stackexchange", "id": 17298, "tags": "java, concurrency" }
Springer book downloader in Kotlin
Question: I'm starting with Kotlin recently and am hoping to improve. I wrote a small app to parse the list of free Springer books and download the books into your chosen local folder. Comments around obvious mistakes, unidiomatic Kotlin, any other points of improvements will be greatly appreciated. Thank you. Gradle dependencies: implementation("org.apache.poi:poi:4.1.2") implementation("org.apache.poi:poi-ooxml:4.1.2") implementation("org.jsoup:jsoup:1.13.1") Kotlin code: package dev.rayfdj.kotlinutils.springer import org.apache.poi.ss.usermodel.WorkbookFactory import org.jsoup.Jsoup import java.io.File import java.io.FileOutputStream import java.net.URL import java.nio.channels.Channels import java.nio.file.Files import java.nio.file.Path import java.nio.file.Paths data class Book(val title: String, val author: String, val edition: String, val year: String, val category: String, val url: String) { fun suggestedFileName(): String { return "$title, $edition - $author.pdf" } } fun extractBooksFromExcelFile(xlsxFile: File): List<Book> { // drop(1): skip the first row because it contains the headers return WorkbookFactory.create(xlsxFile).getSheetAt(0).drop(1).map { Book(it.getCell(0).stringCellValue, it.getCell(1).stringCellValue, it.getCell(2).stringCellValue, it.getCell(4).numericCellValue.toString(), it.getCell(11).stringCellValue, it.getCell(18).stringCellValue) } } fun deriveFullLocalPathForBook(downloadFolder: String, book: Book): Path { val fullLocalFileName = arrayOf( downloadFolder, book.category, book.suggestedFileName()).joinToString(separator = File.separator) return Paths.get(fullLocalFileName) } fun createDirectoriesAndFile(fullLocalFilePath: Path) { Files.createDirectories(fullLocalFilePath.parent) if(!Files.exists(fullLocalFilePath)) { Files.createFile(fullLocalFilePath) } } fun getBookDownloadURL(book: Book): URL { val bookPage = Jsoup.connect(book.url).get() val bookCanonicalURL = bookPage.select("link[rel=canonical]").attr("href") val bookCanonicalPage = Jsoup.connect(bookCanonicalURL).get() val bookPDFRelativeURL = bookCanonicalPage.select("a[href^=\"/content/pdf/\"]").attr("href") return URL("https://link.springer.com${bookPDFRelativeURL}") } fun downloadAndSaveBook(bookDownloadURL: URL, fullLocalFilePath: Path) { Channels.newChannel(bookDownloadURL.openStream()).use { inChannel -> FileOutputStream(fullLocalFilePath.toFile()).channel.use { outChannel -> print("Saving $bookDownloadURL to $fullLocalFilePath... ") outChannel.transferFrom(inChannel, 0, Long.MAX_VALUE) println("DONE.") } } } fun main(args: Array<String>) { if(args.size != 2) { println("Please pass <full_path_to_springer_excel_file> and <full_path_to_download_folder") kotlin.system.exitProcess(-1) } val (excelFile, downloadFolder) = args val books = extractBooksFromExcelFile(File(excelFile)) books.forEach { book -> val fullLocalFilePath = deriveFullLocalPathForBook(downloadFolder, book) createDirectoriesAndFile(fullLocalFilePath) val bookDownloadURL = getBookDownloadURL(book) downloadAndSaveBook(bookDownloadURL, fullLocalFilePath) } } Answer: It's not exact translation of your code, but still :) I've just tried! Unfortunately, it's not a github project, so I don't have an access to the excel sheet, so I can't really run and test it. But, anyways, ideas are: Utilization of toString() java method Using of lazy properties allows to 'cache' the URL easily. It's also possible because all of the Books properties are immutable. Extension method download() doesn't really belong to a book, but, instead, can easily download any URL I used require(), though, it's not really correct, since it throws an exception, instead of gracefully exiting the app. I used it, just to demonstrate it, nothing more. And, probably, something else. Please, tell me what you're thinking! data class Book( val title: String, val author: String, val edition: String, val year: String, val category: String, val url: String ) { constructor(row: Row) : this( row.getCell(0).stringCellValue, row.getCell(1).stringCellValue, row.getCell(2).stringCellValue, row.getCell(4).numericCellValue.toString(), row.getCell(11).stringCellValue, row.getCell(18).stringCellValue ) override fun toString() = "$title, $edition - $author" val downloadURL by lazy { val canonicalURL = Jsoup.connect(url).get().select("link[rel=canonical]").attr("href") val pdfRelativeURL = Jsoup.connect(canonicalURL).get().select("""a[href^="/content/pdf/"]""").attr("href") URL("https://link.springer.com${pdfRelativeURL}") } } fun URL.download(to: Path): Path { Files.createDirectories(to.parent) Channels.newChannel(openStream()).use { inChannel -> FileOutputStream(to.toFile()).channel.use { outChannel -> print("Saving $this to $to... ") outChannel.transferFrom(inChannel, 0, Long.MAX_VALUE) println("DONE.") } } return to } fun main(args: Array<String>) { require(args.size == 2) { "Please pass <full_path_to_springer_excel_file> and <full_path_to_download_folder" } val (excelPath, downloadFolder) = args val excelFile = WorkbookFactory.create(File(excelPath)) // drop(1): skip the first row because it contains the headers val books = excelFile.getSheetAt(0).drop(1).map { Book(it) } books.map { book -> val path = Paths.get(downloadFolder, book.category, "$book.pdf") book.downloadURL.download(to = path) } } ```
{ "domain": "codereview.stackexchange", "id": 38306, "tags": "beginner, kotlin" }
Time calculator for a given speed and file size
Question: I wrote this little code in c++. It calculates the needed time for a given speed of a medium (for example the speed is 1024 B/s, the file size is 1MB, it'll take 17 minutes and 4 seconds to finish). The code works, but I'm not sure if it's proper. Can you tell me if it's okay or not? #include <iostream> int main() { int transmission_speed; //speed of the transmission in bytes per seconds int file_size_mb; //file's size in MBs std::cout << "Enter the speed of the transmission in bytes per seconds: "; std::cin >> transmission_speed; std::cout << "Enter the file's size in megabytes: "; std::cin >> file_size_mb; int file_size_b = file_size_mb *1024*1024; int seconds_needed = file_size_b / transmission_speed; int days_needed = (seconds_needed / 3600) / 24; seconds_needed -= days_needed*86400; int hours_needed = seconds_needed / 3600; seconds_needed -= hours_needed*3600; std::cout << "Days needed: " << days_needed << std::endl; std::cout << "Hours needed: " << hours_needed << std::endl; std::cout << "Seconds needed: " << seconds_needed << std::endl; return(0); } Answer: It's a very simple little program, however, there's a few comments to make: All of the code is in main. Of course, for a program of this size, that doesn't really matter too much, however, you should prefer to break things up into self-contained functions where possible. For example, there should be a separate function here to actually do the calculations. Use the correct data type for what you need. Can time ever be negative here? The answer is no, so I'd prefer to use unsigned over int. You should be somewhat careful about overflow. Any filesize over 2048MB (2GB) will overflow the size of an int (assuming a 4 byte int - in actuality, an int is only technically required to be at least 2 bytes). Using an unsigned will change this to 4GB, however, if you expect any filesizes larger than that, you should look at another datatype (std::uint64_t perhaps). Magic numbers. It's not so bad when dealing with time, because it's generally fairly obvious here, but numbers like 86400 shouldn't be shown as-is. They should be a named constant, such as const unsigned seconds_in_day = 86400. I'd suggest breaking this up into a main function and a required_time(unsigned transmission_rate, unsigned file_size) function.
{ "domain": "codereview.stackexchange", "id": 11390, "tags": "c++, algorithm" }
Kinematics and Acceleration Windows
Question: I am a CS student who is currently programming for a robotics project. The kinematic physics at play has me a bit confused, and I am wondering if someone can provide some clarity on this problem: I need my robot to accelerate to a cruising velocity, cruise for a bit, and then decelerate to a stop. The user defines the acceleration, deceleration, and cruising speed values. I have the ability to know how far the robot has traveled at any given point (polling the encoders). I recall the Kinematic equations: I solved equation 1 to be in terms of time and then substituted for time into equation 2. I then plug in my acceleration, initial velocity, and desired final velocity into the new equation and get a value for delta X. This is my "acceleration window." I do not want to used elapsed time as a metric because the processors being used are unreliable at reporting time accurately. If my robot is in the acceleration window, it gets a new velocity using equation 4 (providing the current speed it is moving at, the distance traveled, and the specified acceleration). If it has traveled outside of the acceleration window, it moves at the cruising velocity (no math needed). If it enters the deceleration window, the 4th equation is again used, but delta X is adjusted to be the distance from the start of the deceleration window to where we are now and acceleration is swapped to the user-specified deceleration. Unfortunately, these windows appear to be too large. The robot accelerates past the target cruising velocity, and it begins to decelerate too early. How can I determine the size of my acceleration and deceleration windows? Answer: Schematics : Time needed to accelerate to cruising speed or to decelerate it to zero speed is $$ t = v_c/a $$ cruising point $$ x_c = \frac{1}{2}a_{_+}t^2 = \frac{v_c^2}{2a_{_+}} $$ deceleration point $$ L - x_d= v_ct-\frac{1}{2}a_{_-}t^2 = \frac{v_c^2}{2a_{_-}} $$ So, $$ x_d = L - \frac{v_c^2}{2a_{_-}} $$ Where $v_c$ - cruising speed; $a_{_+}$, $a_{_-}$ - acceleration and deceleration; $L$ - total distance until stop point bounding conditions Keep in mind that if you want your robot to stop at maximum distance $L$ allowed to travel, then $$x_d \geq x_c$$ Substituting expressions we get above, results in boundary condition : $$ L \geq \frac{v_c^2}{2}\left(\frac{1}{a_{_+}}+\frac{1}{a_{_-}}\right)$$ If this condition is not met - your robot will not stop at destination, but pass through instead
{ "domain": "physics.stackexchange", "id": 62445, "tags": "kinematics" }
How to calculate the proper acceleration (as a 4-vector) in general relativity?
Question: I am trying to derive some equations which will let me simulate the motion of a spaceship in different geometries of spacetime. Suppose I know the metric and Christoffel symbols at the location of the spaceship, and I know the force applied on it (for example the spaceship is firing its thrusters). How do I calculate its coordinate acceleration from this information? What I have tried so far: As far as I understand the most natural thing to calculate from the force provided by the thrusters, is the proper acceleration of the spaceship. Then the wikipedia page for proper acceleration gives the following equation relating proper acceleration to coordinate acceleration: $$ A^{\lambda} := \frac{DU^{\lambda}}{d\tau} = \frac{dU^{\lambda}}{d\tau} + \Gamma^{\lambda}\,_{\mu\nu}\,U^{\mu}U^{\nu} $$ One can rearrange the equation (following the Wikipedia page) to $$ \frac{dU^{\lambda}}{d\tau} = A^{\lambda} - \Gamma^{\lambda}\,_{\mu\nu}\,U^{\mu}U^{\nu} $$ The rightmost term can be evaluated by just expanding the sum and putting in the Christoffel symbols for the spacetime around the black hole (which I can find in a reference). So far everything seems to make sense. However, finding $A^{\lambda}$ from the force provided by the thrusters is something I can't figure out. Wikipedia says this about the proper acceleration: ... is the object's proper-acceleration 3-vector combined with a null time component as seen from the vantage point of a reference or book-keeper coordinate system in which the object is at rest. From this I understand that $$ (A^{x}, A^{y}, A^{z}) = \mathrm{\frac{(vector\; force\; provided\; by\; thrusters)}{(mass\; of\; spaceship)}} $$ in the either the reference of the spaceship, or a stationary reference frame which is instantaneously comoving with the spaceship at a given moment in time (I can't tell which one). The part about the null time component completely confuses me. From what I understand of the wikipedia article, the proper acceleration is also equal to the covariant derivative of the coordinate velocity. However, I don't know much differential geometry and the covariant derivative is completely meaningless to me. Questions: How to go from force provided by thrusters (as a classical 3-vector) to proper acceleration as a 4-vector? Is it correct to say that the three spacelike components of the 4-vector proper acceleration are equal to the resultant acceleration in the frame of the spaceship? If not, how are they defined? What is the timelike component $A^{t}$ equal to and how can one work it out? Answer: How to go from force provided by thrusters (as a classical 3-vector) to proper acceleration as a 4-vector? I will introduce the concept of a momentarily co-moving inertial frame (MCIF). This is the inertial reference frame where the rocket is momentarily at rest. Of course, since the rocket is accelerating and the frame is inertial, the rocket does not remain at rest for more than just a moment. So the thrusters are usually described in terms of their force, $\vec F$ in the MCIF. So your 3-acceleration is just the usual Newtonian $\vec a = \vec F/m = (a_x,a_y,a_z)$. The four-acceleration in the MCIF is then simply $$\mathbf A = (0,\vec a) = (0, a_x, a_y, a_z)$$ To get the four-acceleration in another frame you simply use the Lorentz transform to transform to the required frame. Transforming to a frame which is moving relative to the MCIF at a velocity $v$ in the $x$ direction gives $$\mathbf A = \left(\gamma \frac{v}{c} a_x, \gamma a_x, a_y, a_z \right)$$ Is it correct to say that the three spacelike components of the 4-vector proper acceleration are equal to the resultant acceleration in the frame of the spaceship? If not, how are they defined? Not in the frame of the spaceship, which is non-inertial, but in the MCIF, yes. What is the timelike component At equal to and how can one work it out? See above. The "null" that was confusing you is simply the 0 for the time component in the MCIF.
{ "domain": "physics.stackexchange", "id": 78429, "tags": "general-relativity, differential-geometry, acceleration, vectors" }
In general what will holding an anti-hydrogen atom for more than a 1/10th of second allow scientists to discover?
Question: In general what will holding an anti-hydrogen atom for more than a 1/10th of second allow scientists to discover? Specifically, given that they can hold one for <1/10th of a second, what would they discover that have not previously been able to determine. Or if not known, what have they been able to discover to date? Answer: The ultimate goal is to be able to do precision spectroscopy of antihydrogen, to make sure that the energy states are the same as in ordinary matter. If there are differences between the energy levels of ordinary hydrogen and antihydrogen, that would violate "CP" symmetry, which says that if you change the sign of all the charges in some system, and invert the parity, every interaction should be the same. We know that CP violation occurs in nature-- it's been observed in kaon decay, among other things-- and it's related to the observed asymmetry between matter and antimatter in the universe. The known sources of CP violation are not enough to explain the matter-antimatter imbalance in the universe, though, so there have to be other forms of it out there that have yet to be discovered. From what we known about the interactions of matter and antimatter, any differences in the antihydrogen states would have to be very small, but laser spectroscopy can be used to do measurements of astonishing precision-- there are single-ion atomic clocks that are good to a few parts in $10^{18}$. Having the target atoms trapped for only a tenth of a second complicates matters, but a group at Argonne National Lab did spectroscopic measurements of the charge radius of unstable helium isotopes that don't last very much longer than that, so it's a good step toward the goal of doing spectroscopy. Another thing that people talk about testing with anti-atoms is the behavior of gravity. Again, you need to have trapped neutral atoms for this, because electrostatic forces are thirty-some orders of magnitude stronger than gravity. That will also require extreme precision, and many more atoms than have been trapped to date, but the recent experiments are a good start, and the remaining issues are mostly technical, not fundamental.
{ "domain": "physics.stackexchange", "id": 82, "tags": "particle-physics, antimatter" }
Why does black and yellow indicate danger?
Question: It is a well-known fact that combination of black and yellow indicates danger or poisonousness. In western society it seems obvious this comes from bees and wasps, but it seems like many tropical frogs use yellow and black. Black and yellow also indicates toxicity in some fish species and the order of the colors seems to communicate whether we see a harmless milk snake or a venomous coral snake. So which came first: the fear of black and yellow or the color of bees? Are there any studies about the color combinations effect on human brain? Is it learned or congenital? Answer: First of all, great question! What you describe here is known as aposematism. Aposematism is the adapation of warning signals against the predator. This word is used for any sound, coloring, and odor used as a warning signal. Of course, for this question the focus is color. Honest indications Animal coloration is usually an honest indication of their noxiousness. The brighter they are, the more noxious and toxic they are (http://rspb.royalsocietypublishing.org/content/276/1658/871). In the linked article, there are many references one this positive correlation of toxicity of prey to predator and the brightness of the coloration. In the article they use mathematical models to support the mathemstical evidence. But one thing is for sure: bright colors do mean the prey is armed with defenses, whether the correlation is positive or not. Why then is it black and yellow? The conspicuousness of the coloration is very important because it is important to see from a far away distance. The reason colors like yellow and black are used is because (http://rspb.royalsocietypublishing.org/content/279/1728/417): — They provide high contrast against the background (e.g. red/yellow against green foliage), which promotes detection. — They are resistant to shadows (which are rich in blue-UV), and to changes in illumination (e.g. black should not change during day, whereas white could become ‘pink’ at sunset and sunrise). Therefore, they provide a reliable signal under varied habitats and light conditions. — Yellow/red and black has both high chromatic and luminance contrast. — Such colours may allow distance-dependent camouflage if yellow/red and black ‘blend’ to an average colour that matches the background at a distance when predator vision is no longer sufficient to discriminate individual marking components. — Such colours are distinctive from profitable species. What about the combination of black and yellow? In the same article, they mention that: Many warningly coloured prey have markings comprising repeated pattern elements. Such arrangements in signal structure may increase redundancy in the signal but improve the likelihood that the strategic component will be detected by the receiver. In addition, repeated elements may be rare in many natural environments, thus increasing conspicuousness of the prey animal. Simple pattern components (such as stripes and spots) may facilitate detection and also speed up avoidance learning if they are easier to memorize. The research seem to show that the patterns, such as the stripes, lead to increased avoidance, but it is not sure whether or not it is due to the stripes itself or due to the constrast of the black and yellow. I hope I have answered your question and if you have any questions, feel free to ask in the comments.
{ "domain": "biology.stackexchange", "id": 10162, "tags": "entomology, toxicology, sensation, psychology, optics" }
Did Hilbert publish general relativity field equation before Einstein?
Question: Did Hilbert publish general relativity field equation before Einstein? Answer: 1915 On November 25, nearly ten years after the foundation of special relativity, Einstein submitted his paper The Field Equations of Gravitation for publication, which gave the correct field equations for the theory of general relativity (or general relativity for short). Actually, the German mathematician David Hilbert submitted an article containing the correct field equations for general relativity five days before Einstein. Hilbert never claimed priority for this theory. [Bold mine.] The Official Web Site of the Nobel Prize Edit 1. But... Many have claimed that in 1915 Hilbert discovered the correct field equations for general relativity before Einstein but never claimed priority. The article [11] however, shows that this view is in error. In this paper the authors show convincingly that Hilbert submitted his article on 20 November 1915, five days before Einstein submitted his article containing the correct field equations. Einstein's article appeared on 2 December 1915 but the proofs of Hilbert's paper (dated 6 December 1915) do not contain the field equations. As the authors of [11] write:- In the printed version of his paper, Hilbert added a reference to Einstein's conclusive paper and a concession to the latter's priority: "The differential equations of gravitation that result are, as it seems to me, in agreement with the magnificent theory of general relativity established by Einstein in his later papers". If Hilbert had only altered the dateline to read "submitted on 20 November 1915, revised on [any date after 2 December 1915, the date of Einstein's conclusive paper]," no later priority question would have arisen. [11] L Corry, J Renn and J Stachel, Belated Decision in the Hilbert-Einstein Priority Dispute, Science 278 (14 November, 1997). Source Edit 2. Haha, butbut... :) Source Edit 3. Roundup. Recent controversy, raised by a much publicized 1997 reading of Hilbert's proof-sheets of his article of November 1915, is also discussed [on pp. 11-13; presumed included in this answer]. Einstein and Hilbert had the moral strength and wisdom - after a month of intense competition, from which, in a final account, everybody (including science itself) profited - to avoid a lifelong priority dispute (something in which Leibniz and Newton failed). It would be a shame to subsequent generations of scientists and historians of science to try to undo their achievement. Einstein and Hilbert: The Creation of General Relativity
{ "domain": "physics.stackexchange", "id": 26948, "tags": "general-relativity, history" }
Trying to run rviz: Getting error "Could not contact ROS master..."
Question: I'm trying to run rviz on electric/Ubuntu 10.4. I am getting the popup message: rviz: Waiting for master Could not contact ROS master at [http://localhost:11311, retrying... I've run rviz before and never seen this. Any assistance is appreciated. Paul. Originally posted by Paul0nc on ROS Answers with karma: 271 on 2011-09-26 Post score: 1 Original comments Comment by Paul0nc on 2011-10-02: Thanks Hsu. This worked. Comment by hsu on 2011-09-26: Paul, you can edit your original question, rather than posting an answer. Is your roscore running and your ROS_MASTER_URI pointing to it? Answer: Try: In one terminal run "roscore" Then, in a new terminal do "rosrun rviz rviz" Originally posted by jrcapriles with karma: 370 on 2011-09-26 This answer was ACCEPTED on the original site Post score: 15
{ "domain": "robotics.stackexchange", "id": 6781, "tags": "rviz" }
How to avoid duplicate if-else statements within a Typescript switch?
Question: export function operator(a: number, b: number, operator: string, type: string): string | number { switch (operator) { case 'add': if (type == "answer") { return a + b; } else if (type == "assignment") { return a.toString() + " + " + b.toString(); } case 'sub': if (type == "answer") { return a - b; } else if (type == "assignment") { return a.toString() + " - " + b.toString(); } } } How to avoid code duplication in a Typescript switch? Attempt if (operator == 'add' && type == "answer") { return a + b; } else if (operator == 'add' && type == "assignment") { return a.toString() + " + " + b.toString(); } else if (operator == 'sub' && type == "answer") { return a - b; } else if (operator == 'sub' && type == "assignment") { return a.toString() + " - " + b.toString(); } Answer: You can use conditional/ternary operator return type === 'assignment' ? a + ' + ' + b : a + b; toString() when working with strings, work as string concatenation operator and operands will be implicitly casted to string. Thus, toString() in else is not required. Note that, the code is not equivalent to if...else if..., this is if...else code and is equivalent to if (type === 'assignment') { return a + ' + ' + b; } else { return a + b; }
{ "domain": "codereview.stackexchange", "id": 23386, "tags": "typescript" }
Derivation of the Schwarzschild metric: why are $g_{22}$ and $g_{33}$ the same as for flat spacetime?
Question: I'm trying to understand the derivation of the Schwarzschild metric from Wikipedia, but I simply do not understand why, therein, $g_{22}$ and $g_{33}$ must be those of the flat spacetime. Couldn't $g_{22}$ and $g_{33}$ have any other radial dependence than that of the flat space? If $g_{22}$ and $g_{33}$ were only dependent on $r$ (arbitrary, how exactly), that would be spherical symmetric as well, I suppose. Why are they set to the coefficients of the flat space time? Addendum: I especially don't see why they can't be another function of r. For example how about $g_{22}=A(r)r^2 d\theta^2$ and $g_{33} = A(r)r^2sin^2\theta$? That would be also spherically symmetric as $g_{22}$ and $g_{33}$ only depend on $r$. Could those second A(r)s simply be transformed to the flat spacetime coefficients? Please, show how, in detail. Addendum 2: Meanwhile, I came across a coordinate change in Eddingtons Mathematics of Relativity. They start with U(r), V(r) , W(r) as prefactors for radial, tangential, and temporal component, respectively (as this is indeed the most general sperically symmetric metric). Then, they do the coordinate transformation r1^2=r^2V(r) and end up with only U and W and simply r1 instead of r as radial coordinate. However, now I don't see why the r1 should still be the radial coordinate of normal spherical coordinates. It's totally messed up if V(r) is complicated, isn't it? However, in the derivation of the Schwarzschild metric, it's treated as the normal spherical symmetric radial coordinate. Answer: The Schwarzschild solution is a spherically symmetric solution produced by a central source. This means that at $t = \mathrm{const}$ the metric should be invariant under rotations. \begin{equation} ds^{2} = -A_{1} (r,t) c^{2} dt^{2} + A_{2}(r,t) dr^{2}+A_{3}(r,t) dr dt +A_{4}(r,t)(d\theta^{2} + \sin^{2}\theta \,d\phi^{2}) \end{equation} From gauge invariance we can choose $t = f_{1} (\tilde{t}, \tilde{r})$, $r = f_{2} (\tilde{t}, \tilde{r})$ so that $\tilde{A}_{3} = 0$ and $\tilde{A}_{4} = \tilde{r}^{2}$. Then if we forget about the new notation and refer at the new coordinates simply by $t$ and $r$, at constant $t$ and $r$ we have \begin{equation} d\sigma^{2} = r^{2} (d\theta^{2} + \sin^{2} \theta \,d\phi^{2}) \end{equation} This argument is the one presented by M. Gasperini in https://link.springer.com/book/10.1007/978-3-319-49682-5 Answer to comment: Suppose that $d\sigma^{2} = C(r)r^{2} (d\theta^{2} + \sin^{2} \theta \,d\phi^{2})$, then you can choose new coordinates \begin{equation} \tilde{r}^{2} = C(r)r^{2} \end{equation} so that $dr^{2} = F(\tilde{r}) d\tilde{r}^{2}$, where $F(\tilde{r})$ include the function $C(\tilde{r})$ and its derivative expressed as functions of $\tilde{r}$. The metric then become \begin{equation} ds^{2} = -A_{1} (\tilde{r},t) c^{2} dt^{2} + A_{2}(\tilde{r},t)F(\tilde{r})d\tilde{r}^{2}+\tilde{r}^{2}(d\theta^{2} + \sin^{2} \theta \,d \phi^{2}) \end{equation} At this point we can simply drop the tilde for the notation and call $A_{2}(\tilde{r},t)F(\tilde{r}) \equiv A_{2}(r,t)$.
{ "domain": "physics.stackexchange", "id": 96825, "tags": "general-relativity, spacetime, differential-geometry, metric-tensor, coordinate-systems" }
A basic physical chemistry problem
Question: I am a high school student and have just begin my classes(online). I just started physical chemistry but I know about moles, percentage composition, empirical formula. I haven't been able to solve this question and due to my inability to approach my teachers(most of the time they are busy recording lectures), I post my question here. In particular I am confused by that silver chloride line. Answer: You can use each of the emperical formulas given and produce a balanced complete combustion question for each one as well as working out the moles of each compound given. You already know how many moles of CO2 and water were produced because you were given weights. From there you can see which compound it is most likely to be because the moles of that compound should produce the moles of the products when the stoichiometry is taken into account. As for the silver chloride: (Correction) You cannot directly react AgNO3 with organic solvents but when you convert the compound into a sodium salt and dissociate the chloride ions, the chloride ions will then react with the AgNO3 in a 1:1 molar ratio. So working out the moles of AgCl produced and comparing with the moles potentially produced from each compound will help you determine if there is one or two Cl atoms in the compound you're looking for.
{ "domain": "chemistry.stackexchange", "id": 13688, "tags": "elemental-analysis" }
Complexity analysis of while loop with two conditions
Question: I am curious how to do a line by line analysis of this piece of code using the "Big O" notation. i = 0; j = 0; while ( ( i < n ) && ( j < m ) ) { //do something i++; j++; } How I should represent number of iterations for the loop? Is good if I will do some assumptions or I should write min(n, m)? Small extension after @Patrick87's comment to show why I am not sure that min() is a general solution: i = 0; j = 0; while ( ( i < n ) && ( j < m ) ) { //do something i++; j++; } if ( i < n ) { while ( ( i < n ) ) { //do something i++; } } else { while ( ( j < m ) ) { //do something j++; } } How right now we can connect a number of iterations of the first loop and second one if we don't know which condition broke the condition of the first loop? Answer: As Patrick87 pointed out. The first loop is $\min(m,n)$. As for your extended question, it's unclear if you want best, worst, or average case. We typically look at worst case. Either way, you seem to be focused on a single execution when you really need consider all possible $i$ and $j$ values. So, let's do that: $m = n$. In this case the additional conditional does no work, so the total run time is still $O(\min(m,n))$. $m < n$. When this is true, then the first loop terminates due to $j$. The conditional will then carry out the first case $i < n$ and terminate after $O(n-m)$ iterations. The total number of operations is then $O(n-m+m)= O(n)$ operations. $m > n$. This case is equivalent to the second case but the result is $O(m)$. Now observe that cases 1,2, & 3 are all equivalent to $O(\max(m,n))$. This is the running time for your code. So back to your "how to" question. You need to do a good, thorough case analysis for multi-conditional loops just like you need to do with conditionals. Then analyze the running time of each case and, assuming your doing worst case analysis, take the largest running time.
{ "domain": "cs.stackexchange", "id": 2238, "tags": "algorithm-analysis, runtime-analysis" }
Find the smallest difference between two numbers in a DS in O(1) time
Question: I got an assignment to create a new data structure, with the following rules: Init - O(1). Insert x - O(log$_2$n). Delete x - O(log$_2$n). Search for x- O(log$_2$n). Find max difference between two values in the DS - O(1). Find min difference between two values in the DS - O(1). I've outlined the basics of the structure to look somewhat like an array/arryaList in which I can complete the tasks using heap-like methods, and that way I'll be able to complete 1-4 in the times given. Regarding 5 - I need to return the difference between the maxVal and minVal of the array, so it'll be the first number (arr[0]) and I'll change the leaves to be so that the max is the last value (a[n-1]), and then I'll be in O(1). Regarding 6, and this is where I'm stuck - how can I find the smallest difference between two values in O(1) time? I don't know of any methods that accomplish the task in O(1)... Thank you! Answer: Use an AVL tree with each node having three additional entries $\min,\; \max$, and $\text{closest_pair} = (i,j)$, representing the minimum and maximum values of the tree rooted at that node. At the time of insertion and deletion, these values will be updated (Note that only $O(\log(n))$ node updates are needed per insert/delete operation). Now, this data structure represents your required data structure. N.B. For a node $i,$ following relation holds. $i$.closest_pair = closest( [ ($i$.left,$i$), ($i$,$i$.right), $i$.left.closest_pair, $i$.right.closest_pair ] )
{ "domain": "cs.stackexchange", "id": 20037, "tags": "time-complexity, data-structures, runtime-analysis, binary-trees, heaps" }
Will a polyclonal antibody attach to proteins of different kDA?
Question: If I have a GST 26kDa polyclonal antibody, will it bind to the GST 28kDa protein as well? Answer: If the sequence of the 26 kDa form is included within the sequence of the 28 kDa form then the answer is 'probably yes'. However if the N- or C-terminus of the short form is an important/dominant epitope and it is masked in the long form (by being extended) then the activity of the serum against the long form could be significantly reduced.
{ "domain": "biology.stackexchange", "id": 7693, "tags": "biochemistry, proteins, antibody, immunoglobin" }
Relative primality is primitive recursive
Question: How do I prove that the predicate $P(x , y)$ is primitive recursive, where $P(x,y)$ holds if $x,y$ are relatively prime? Answer: Two numbers $x,y$ are not relatively prime iff there exists some common factor $a > 1$, which divides both $x$ and $y$. This common factor can be bounded by $a$. Therefore $x,y$ are not relatively prime iff there exist $a,b \leq x$ and $c \leq y$ such that (i) $a \geq 2$, (ii) $x = ab$, and (iii) $y = ac$.
{ "domain": "cs.stackexchange", "id": 17318, "tags": "computability, primitive-recursion" }
Does Hamilton Mechanics give a general phase-space conserving flux?
Question: Hamiltonian dynamics fulfil the Liouville's theorem, which means that one can imagine the flux of a phase space volume under a Hamiltonian theory like the flux of an ideal fluid, which doesn't change it's volume. But is it possible to reproduce every phase space conserving flux with an appropriate Hamiltonian? So can I simply imagine the entity of all possible Hamiltonian dynamics as all possible phase space conserving fluxes? Or are Hamiltonian dynamics a special case for phase space conserving fluxes? If they are a special case, what would be an example for a phase space conserving flux for which there is no Hamiltonian that can produce it? Answer: First, let's take a look at one-dimensional systems with phase space dimension $2$. The volume form is just the symplectic one, ie any volume-preserving flow is symplectic and thus at least locally Hamiltonian (but not necessarily globally so). Now, consider an arbitrary phase space of dimension $2n\geq4$ with canonical coordinates $q^i,p^i$. Up to a constant factor, the volume form is $$ \Omega = dq^1\wedge\cdots\wedge dq^n\wedge dp^1\wedge\cdots\wedge dp^n $$ and the symplectic form $$ \omega = \sum_i dp^i\wedge dq^i $$ Let's take a look at the vector field $X$ given by $$ X=q^1\frac{\partial}{\partial q^2} $$ As $$ \mathcal L_X\Omega = 0 $$ phase space volume will be preserved, but as $$ \mathcal L_X\omega \not= 0 $$ the vector field is not symplectic and thus also not Hamiltonian.
{ "domain": "physics.stackexchange", "id": 10029, "tags": "classical-mechanics, hamiltonian-formalism, hamiltonian, vector-fields, phase-space" }
gazebo model dynamically modified by programming
Question: Hi, How to modify the model in gazebo like stl file by C or C++? Example: I load a face model on gazebo, and I want to revise small portion of face model (lips) when speaking. How to do that? Is there any tutorials? I have found the gazebo Model Creation Tutorial http://playerstage.sourceforge.net/doc/Gazebo-manual-svn-html/tutorial_model.html But it's in XML format,so how I can change it by programming? I can't find any information about that. Thank you~ Originally posted by sam on ROS Answers with karma: 2570 on 2011-03-15 Post score: 0 Answer: Hi sam, A while back, I was trying something similar (walking legs), but I had to abandon it. Perhaps what I found could be of help, but I should warn you, my approach is rather hacky. Gazebo runs as a server and you can connect to it using the client/server interfaces in libgazebo. The documentation with an example is available in the Gazebo manual. Also, the ROS wrapper for gazebo very nicely uses these interfaces to expose some topics and services in ROS. You can find the code on your machine at this path: $(find gazebo)/src/gazeboros.cpp Specifically, you might want to look at functions used to retrieve and manipulate models at run-time: gazebo::Model* model = dynamic_castgazebo::World::Instance()->GetEntityByName(your_model_name)); const std::vector children = model->GetChildren(); // iterate using variable 'iter' gazebo::Body* body = dynamic_cast(*iter); const std::map* geoms = body->GetGeoms(); // magic happens here Sorry I didn't get any further, but I'm interested in what you find. Best, Nikhil PS: The model and entity names should be carefully specified to match the XML file that is loaded. Originally posted by scribbleink with karma: 116 on 2011-06-07 This answer was ACCEPTED on the original site Post score: 2
{ "domain": "robotics.stackexchange", "id": 5076, "tags": "ros, gazebo, stl, model" }
using an external library: C versus C++ issue
Question: I downloaded a vendor's library for accessing analog I/O (http://www.rtd.com/software/CM/aAIO/aAIO_Linux_V02.00.00.tar.gz) on their motherboard (http://www.rtd.com/PC104/CM/CMX32/CMX32MVD.htm) and it works fine. I can compile the driver, install the driver, compile the library, compile the example usage code and run the example usage code that uses the library. It works like a charm. All the compiles use the command line "make" command. My problem is that I can't figure out how to get this exact same example code to compile in my catkin workspace and then add ROS code into it so I can publish the analog readings as ROS topics. (Actually the link to the tarball above is old and the vendor emailed me a new version that is not on their website yet. Let me know if you would like me to get that corrected tarball to you.) The example code I want to start with is "soft_trig.c" from the examples folder. I can copy that file into my catkin package, add it to the CMakeLists.txt, get it to compile as straight C code linked to the library without any ROS calls, and even execute it using rosrun just fine. Here is my working CMakeLists.txt: cmake_minimum_required(VERSION 2.8.3) project(ros_aaio_node) find_package(catkin REQUIRED COMPONENTS roscpp rospy std_msgs ) catkin_package( ) include_directories( ~/aaio/include ${catkin_INCLUDE_DIRS} ) link_directories(~/aaio/lib) add_executable(ros_aaio_node src/soft_trig.c) target_link_libraries(ros_aaio_node rtd-aaio ${catkin_LIBRARIES} ) So now I want to add ROS stuff to the file so I can publish data as ROS Topics. This is where I don't know what to do. I added #include <ros/ros.h> to the file and I got lots of compile errors. Lots of header files were not found. It occurred to me that all my other ROS code was cpp files, not c files, so I renamed the file to soft_trig.cpp and changed the executable line in the CMakeLists.txt file too and I get a lot of different compile errors now. Tons of deprecated conversion warnings and several invalid conversion errors. I saw some working code on another project where a coworker had been using a straight C compiled library with their ROS code and they used these two lines in their CMakeLists.txt: set(CMAKE_C_FLAGS "-std=c99" ) set(CMAKE_CXX_FLAGS "-fpermissive") So I tried that and all the compiler warnings and errors went away. But now I get a whole slew of undefined reference errors during linking. Linking CXX executable ~/catkin_ws/devel/lib/ros_aaio_node/ros_aaio_node CMakeFiles/ros_aaio_node.dir/src/soft_trig.cpp.o: In function `main': soft_trig.cpp:(.text+0x851): undefined reference to `aAIO_Open(aAIO_Descriptor**, unsigned char)' soft_trig.cpp:(.text+0x873): undefined reference to `aAIO_Return_Status(aAIO_Descriptor*, int, char*)' soft_trig.cpp:(.text+0x8a0): undefined reference to `aAIO_Reset(aAIO_Descriptor*)' soft_trig.cpp:(.text+0x8c2): undefined reference to `aAIO_Return_Status(aAIO_Descriptor*, int, char*)' soft_trig.cpp:(.text+0x934): undefined reference to `aAIO_Install_ISR(aAIO_Descriptor*, void (*)(unsigned int), void (*)(unsigned int), void (*)(unsigned int), void (*)(unsigned int), void (*)(unsigned int), void (*)(unsigned int), void (*)(unsigned int), void (*)(unsigned int), int, int)' soft_trig.cpp:(.text+0x956): undefined reference to `aAIO_Return_Status(aAIO_Descriptor*, int, char*)' soft_trig.cpp:(.text+0x99c): undefined reference to `aAIO_Interrupt_Enable(aAIO_Descriptor*, aaio_channel, aaio_interrupt)' soft_trig.cpp:(.text+0x9be): undefined reference to `aAIO_Return_Status(aAIO_Descriptor*, int, char*)' soft_trig.cpp:(.text+0xa7f): undefined reference to `aAIO_Write_CGT_Entry(aAIO_Descriptor*, aaio_cgt)' soft_trig.cpp:(.text+0xaa1): undefined reference to `aAIO_Return_Status(aAIO_Descriptor*, int, char*)' soft_trig.cpp:(.text+0xae2): undefined reference to `aAIO_Software_Trigger(aAIO_Descriptor*)' soft_trig.cpp:(.text+0xb04): undefined reference to `aAIO_Return_Status(aAIO_Descriptor*, int, char*)' soft_trig.cpp:(.text+0xb3e): undefined reference to `aAIO_Read_Result(aAIO_Descriptor*, aaio_channel, int*)' soft_trig.cpp:(.text+0xb60): undefined reference to `aAIO_Return_Status(aAIO_Descriptor*, int, char*)' soft_trig.cpp:(.text+0xc50): undefined reference to `aAIO_Remove_ISR(aAIO_Descriptor*)' soft_trig.cpp:(.text+0xc72): undefined reference to `aAIO_Return_Status(aAIO_Descriptor*, int, char*)' soft_trig.cpp:(.text+0xc9f): undefined reference to `aAIO_Reset(aAIO_Descriptor*)' soft_trig.cpp:(.text+0xcc1): undefined reference to `aAIO_Return_Status(aAIO_Descriptor*, int, char*)' soft_trig.cpp:(.text+0xcee): undefined reference to `aAIO_Close(aAIO_Descriptor*)' soft_trig.cpp:(.text+0xd10): undefined reference to `aAIO_Return_Status(aAIO_Descriptor*, int, char*)' collect2: error: ld returned 1 exit status These are all the calls to the C compiled library that the example file was just linking to and using just fine when the filename was soft_trig.c. Now that the filename is soft_trig.cpp, I get these linker errors. I can't seem to win. What am I doing wrong? How can I use this existing example C code and turn it into ROS code? Originally posted by Kurt Leucht on ROS Answers with karma: 486 on 2016-02-19 Post score: 0 Answer: There are (at least) two issues here (both of which are - strictly speaking - actually not very ROS specific): compiling C code with a C++ compiler C++ name mangling re 1): as you discovered, renaming a file with C code doesn't necessarily make it (valid) C++, hence the need for the compiler flags. Warnings like deprecated conversion are exactly what one would expect. re 2): linking probably fails because C++ mangles names. Linker is looking for ?Fi_i@@YAHH@Z, mfg library exports int Fi_i(int bar). Compiling C as C++ is most likely the cause. Suggestion: treat the library you got from the mfg as just another system dependency. Use the normal make, (sudo) make install procedure to install the mfg lib and headers into /usr/local (or wherever it installs), like you already did before you "add(ed) ROS stuff". Then either - as you do now - hard-code the location of the headers and library in your CMakeLists.txt (not recommended), or write a minimal FindAAIO.cmake (or whatever you name it) that searches for the library and headers at CMake configuration time (highly recommended). Up to here everything is actually non ROS-specific: this is a normal CMake workflow. For your ROS node(s), just #include <..> the necessary headers from the mfg library into your C++ sources, but make sure they already do something like: #ifdef __cplusplus extern "C" { #endif ... #ifdef __cplusplus } #endif This basically tells a C++ compiler to not mangle the names for any symbols declared inside those guards. I'd be surprised if the mfg library's headers don't already do this, but do check. This avoids the linker errors you encountered earlier. Ideally your ROS node(s) would now be just a 'thin wrapper' around some AAIO library functions. Originally posted by gvdhoorn with karma: 86574 on 2016-02-20 This answer was ACCEPTED on the original site Post score: 3 Original comments Comment by Kurt Leucht on 2016-02-22: Thanks! Adding the extern "C" wrapper to all the vendor's header files appears to have worked! Comment by gvdhoorn on 2016-02-23: Alternatively you could extern "C" your #include <> statements (so in your own sources). That way you don't have to change the vendor files. Comment by Kurt Leucht on 2016-02-23: That's even better! Thanks!
{ "domain": "robotics.stackexchange", "id": 23842, "tags": "ros, external-libraries" }
Why time-dependent canonical transformation satisfies symplectic condition?
Question: I am reading Chapter 9 of Goldstein. He proves that any time-independent canonical transformations satisfy symplectic condition. And after that, he shows that if we ignore second order small quantity, then, the infinitesimal time-dependent canonical transformation will satisfy symplectic condition. But how can we make sure that finite canonical transformations also satisfy this condition? What I am confused about is that this problem is very similar to infinitesimal rotations, but there, the commutative property is lost when the rotation becomes finite. I think that is because the second order terms can not be ignored, so, in canonical transformation, for same reason, will it also lose the property that it satisfied symplectic condition when canonical transform is finite? Answer: In differential-geometric terms, there is a bijective correspondence between 1-parameter time-dependent flows $\sigma$ and time-dependent vector fields $X$. In particular, there is a bijective correspondence between 1-parameter time-dependent symplectic flows $\sigma$ and time-dependent symplectic vector fields $X$. See also this related Phys.SE post. Goldstein's definition $$ (\sum_{i=1}^np_i\mathrm{d}q^i-H\mathrm{d}t) -(\sum_{i=1}^nP_i\mathrm{d}Q^i -K\mathrm{d}t) ~=~\mathrm{d}F\tag{9.11}$$ of a finite (possibly time-dependent) canonical transformation (CT) form a groupoid. Goldstein shows below eq. (9.66) that a (possibly time-dependent) infinitesimal canonical transformation (ICT) of type 2 is an infinitesimal symplectomorphism (IS). An application of the Poincare lemma shows that the opposite infinitesimal statement holds locally. (There could be topological obstructions, since not all symplectic vector fields are Hamiltonian vector fields.) We deduce by integrating the infinitesimal result, that a CT path-connected to the identity transformation is a symplectomorphism. (It follows from the fundamental theorem of calculus that second-order terms in $\epsilon$ can be ignored.) One could presumably with some work show directly (i.e. without integrating the infinitesimal result) that a (possibly time-dependent) finite CT is a symplectomorphism, although I have not attempted it so far. Lastly, OP mentions the orthogonal Lie group $$SO(3)~:=~ \{ M\in {\rm Mat}_{3\times 3}(\mathbb{R}) \mid M^TM = \mathbb{1}_{3\times 3}, \det(M)>0 \}$$ of 3D rotations, which is non-abelian/non-commutative, i.e. the order matters. Infinitesimal rotations correspond to the Lie algebra $$so(3)~:=~ \{ m\in {\rm Mat}_{3\times 3}(\mathbb{R}) \mid m^T = -m \},$$ which consists of antisymmetric $3\times 3$ real matrices. A 1-parameter family of infinitesimal rotations $$[0,1]~\ni~ s~~\mapsto~~ m(s)~\in~ so(3)$$ can be integrated to a finite rotation $$M~=~{\cal P}\exp\left\{\int_0^1\! \mathrm{d}s~ m(s)\right\}~\in ~SO(3),$$ where ${\cal P}$ denotes path-ordering. We stress that the orthogonality condition for the finite rotation $M$ is not spoiled by higher-order contributions, cf. OP's remarks. References: H. Goldstein, Classical Mechanics; Chapter 9.
{ "domain": "physics.stackexchange", "id": 63421, "tags": "mathematical-physics, coordinate-systems, hamiltonian-formalism, flow, phase-space" }
Confusing time dilation - proper time is higher?
Question: The problem states that 2 rockets of proper length 100m are going in opposite directions. From the system of rocket A, the tip of B took 5 microseconds to pass the rocket A. If a clock on the tip of B marked t=0 when their tips met, what does the clock says when rocket B reaches the end of A. (I assume that all of this is measured from rocket A) First, I computed the relative velocity (dividing the length travelled by the time it took), $v= 2 \times 10^7$ m/s. So $\gamma=1.00223$. Then I used the Lorentz transform of times: $t' = \gamma(t-(v \times 100)/c^2)$, then $t' = 4.989 \times 10^{-6}$ seconds. I understand the math but this doesn't match with the statement "proper time is always the lowest" because this proper time $5 > 4.989$ microseconds. Answer: Your calculation is correct but you muddled which one is the proper time. Proper time is the time elapsed between events as observed in the frame in which those events are at the same spatial location. In other words, it is the time registered by a clock that is carried from one event to the other. This is the clock on the tip of B in this example. It registers the time $t'$.
{ "domain": "physics.stackexchange", "id": 63073, "tags": "special-relativity" }
How to remove potting material from an electronic package?
Question: I have an electronic device that uses black potting materiel similar the one below to I believe to protect the electronic components from elements. I would like to safely remove this material. This material might be some type of epoxy. There are plenty of suggestions how to remove potting material (removing potting electronics). Some indicate that the chemicals in the potting might be harmful. How can I safely remove the potting material? Through observation can I safely narrow down the potting material? Answer: It's not easy to identify which kind of potting material it is. However, you can try the following common ways of de-potting, ordered from easiest to hardest: Heating it up. Some compounds separate from the substrate when you just heat it to about 150 Celsius. Soaking it in water between heating sessions can help the separation. Hit it with a hammer. If the coating is only on one side of the PCB like in the image, a heavy blow can cause it to separate. Acetone bath. Leave it for an hour, scrape off any goo, repeat until all plastic is gone. Basic precautions like protective gloves and goggles are necessary. Heated nitric acid bath. This will need about 70% nitric acid and fume removal. I wouldn't attempt this without a proper lab setup. Obviously all of these have significant risk of damaging the electronics.
{ "domain": "engineering.stackexchange", "id": 5427, "tags": "electrical-engineering" }
I have a pandas dataframe and i need to clear all the special characters other than space
Question: Input: import pandas as pd df=pd.read_excel("OCRFinal.xlsx") df['OCR_Text']=df['OCR_Text'].str.replace(r'\W+'," ") print(df['OCR_Text']) Output: The excel removes all the special characters along with the space. But i dont want space characters to be removed Answer: import pandas as pd df=pd.read_excel("OCRFinal.xlsx") whitespace = "\r\n\t" df['OCR_Text']=df['OCR_Text'].apply(lambda x: x.strip(whitespace)) print(df['OCR_Text'])
{ "domain": "datascience.stackexchange", "id": 7026, "tags": "pandas, preprocessing, excel" }
SQLite and Python: commit once every 10 seconds maximum, and not after every client request
Question: In a Python Bottle server using SQLite, I noticed that doing a DB commit after each INSERT is not efficient: it can use 100ms after each client request. Thus I wanted to improve this in How to commit DB changes in a Flask or Bottle app efficiently? . I finally came to this solution, which is more or less a "debounce"-like method: if multiple SQL INSERT happen during a 10-second timeframe, group all of them in a single DB commit. What do you think of the following code? Is it safe to do like this? (Note: I know that the use of a global variable should be avoided, and replaced by a class / object with attributes, etc. I'll do this, but this part is not really on topic here). import bottle, sqlite3, random, threading, time @bottle.route('/') def index(): global committhread c = db.cursor() c.execute('INSERT INTO test VALUES (?)', (random.randint(0, 10000),)) c.close() if not committhread: print('Calling commit()...') committhread = threading.Thread(target=commit) committhread.start() else: print('A commit is already planned.') return 'hello' def commit(): global committhread print("We'll commit in 10 seconds.") time.sleep(10) # I hope this doesn't block/waste CPU here? db.commit() print('Committed.') committhread = None db = sqlite3.connect('test.db', check_same_thread=False) db.execute("CREATE TABLE IF NOT EXISTS test (a int)") committhread = None bottle.run(port=80) If you run this code, opening http://localhost/ once will plan a commit 10 seconds later. If you reopen http://localhost/ multiple times, less than 10 seconds later, you will see that it will be grouped in the same commit, as desired. Note: this method is not "Do it every 10 seconds" (this would be a classic timer), but rather: "Do it 10 seconds later; if another INSERT comes in the meantime, do all of them together". If there is no INSERT during 60 minutes, with my method it won't do anything at all. With a timer it would still periodically call a function (and notice there's nothing to do). Worth reading too: How to improve SQLite insert performance in Python 3.6? Answer: What do you think of the following code? Is it safe to do like this? In my opinion, it is not. So many things could go wrong. One example: in this code there is no exception handling. If your program crashes for any reason, your routine may not be triggered. Fix: add an exception handler that does some cleanup, commits and closes the DB. Or better yet, just do that commit in the finally block. By the way the doc says this about the close function: This closes the database connection. Note that this does not automatically call commit(). If you just close your database connection without calling commit() first, your changes will be lost! So it is a good idea to commit systematically. time.sleep(10) # I hope this doesn't block/waste CPU here? time.sleep blocks the calling thread. It is useless anyway because what you want here is a timer, not a thread. You can have a timer routine that runs every n seconds to perform a given task. But you should still have a commit in the finally block, so that all pending changes are written to the DB when the program ends, even after an exception. Now to discuss the functionality more in depth: You say: In a Python Bottle server using SQLite, I noticed that doing a DB commit after each INSERT is not efficient: it can use 100ms after each client request. That may not be 'efficient' but if I have to choose between slow and safe there is no hesitation. Have you actually measured how long it takes on your own environment ? On Stack Overflow you wrote: I optimized my server to serve pages very fast (10ms), and it would be a shame to lose 100ms because of DB commit. While I applaud your obsession with performance, does 100 ms really make a difference to your users ? It normally takes more than 100 ms to load a page or even refresh a portion of it using Ajax or a websocket. The latency resides in the network transport. I don't know how your application is structured but my priority would be to deliver as little traffic as possible to the users. Websocket + client-side JS should do. Perhaps using a different storage medium could improve IO performance. If you are not using a SSD drive, maybe you could consider it or at least test it. Before writing code like this I would really try to exhaust all possibilities, but it is better (more reliable) to let SQLite handle things using the options that already exist. What have you tried so far ? Would this be acceptable to you ? PRAGMA schema.synchronous = OFF With synchronous OFF (0), SQLite continues without syncing as soon as it has handed data off to the operating system. If the application running SQLite crashes, the data will be safe, but the database might become corrupted if the operating system crashes or the computer loses power before that data has been written to the disk surface. On the other hand, commits can be orders of magnitude faster with synchronous OFF. Source: PRAGMA Statements There is a risk of corruption in case of power loss but this is no worse than what you are doing. If on the other hand data integrity is more important you should stick to a full commit after every operation. You should also have a look at Write-Ahead Logging. This may interest you if there are concurrent writes to your database. Otherwise opening the DB in EXCLUSIVE mode may bring some benefits (see the PRAGMA page for details). More detailed discussions: Atomic Commit In SQLite 35% Faster Than The Filesystem Last but not least: transactions. SQLite starts an implicit transaction automatically every time you run a SQL statement and commits it after execution. You could initiate the BEGIN & COMMIT TRANSACTION statements yourself. So if you have a number or related writes, regroup them under one single transaction. Thus you do one commit for the whole transaction instead of one transaction per statement (there is more consistency too: in case an error occurs in the middle of the process you won't be left with orphaned records). There are quite many things you can try until you find the mix that is right for you.
{ "domain": "codereview.stackexchange", "id": 38194, "tags": "python, multithreading, database, sqlite, bottle" }
How to simulate the density matrix in Qiskit when using the qasm simulator?
Question: When simulating a circuit using qasm simulator, if there is a depolarizing channel in the noise model, then the results could be mixed states. How can I calculate the density matrix of the mixed states? Answer: The easiest way to do this would be to probably use the density matrix snapshot instruction: https://qiskit.org/documentation/stubs/qiskit.providers.aer.extensions.SnapshotDensityMatrix.html#qiskit.providers.aer.extensions.SnapshotDensityMatrix basically just add circuit.snapshot_density_matrix('density_matrix') to your circuit where you want to get the density matrix. That will store the density matrix in the output results. Here is an example script: from qiskit.test.mock import FakeVigo from qiskit import QuantumCircuit from qiskit.providers.aer import extensions # import aer snapshot instructions from qiskit import execute qc = QuantumCircuit(2) qc.h(0) qc.cx(0, 1) qc.snapshot_density_matrix('density_matrix') result = execute(qc, FakeVigo()).result() # Extract density matrix snapshot from result object: density_matrix = result.data()['snapshots']['density_matrix']['density_matrix'][0]['value'] print(density_matrix) In that script FakeVigo is just running Aer under the covers with a noise model taken from a snapshot of the backend properties from the IBMQ vigo device.
{ "domain": "quantumcomputing.stackexchange", "id": 2399, "tags": "programming, qiskit" }
What is the number of languages accepted by a DFA of size $n$?
Question: The question is simple and direct: For a fixed $n$, how many (different) languages are accepted by a DFA of size $n$ (i.e. $n$ states)? I will formally state this: Define a DFA as $(Q,\Sigma,\delta,q_0,F)$, where everything is as usual and $\delta:Q\times\Sigma\to Q$ is a (possibly partial) function. We need to establish this since sometimes only total functions are considered valid. For every $n\geq 1$, define the (equivalence) relation $\sim_n$ on the set of all DFAs as: $\mathcal{A}\sim_n\mathcal{B}$ if $|\mathcal{A}|=|\mathcal{B}|=n$ and $L(\mathcal{A})=L(\mathcal{B})$. The question is, then: for a given $n$, what is the index of $\sim_n$? That is, what is the size of the set $\{L(\mathcal{A})\mid\mathcal{A}\textrm{ is a DFA of size }n\}$? Even when $\delta$ is a total function, it doesn't seem to be an easy count (for me, at least). The graph might not be connected, and the states in the connected component containing the initial state might all be accepting, so, for example, there are many graphs of size $n$ accepting $\Sigma^*$. Same with other trivial combinations for the empty language and other languages whose minimal DFA has fewer than $n$ states. (A naïve) recursion doesn't seem to work either. If we take a DFA of size $k$ and add a new state, then, if we want to keep determinism and make the new graph connected (to try to avoid trivial cases), we have to remove a transition to connect the new state, but in that case we may lose the original language. Any thoughts? Note. I updated the question again, with a formal statement and without the previous distracting elements. Answer: I think that this question has been studied previously. Mike Domaratzki wrote a survey on research in this area: "Enumeration of Formal Languages", Bull. EATCS, vol. 89 (June 2006), 113-133: http://www.eatcs.org/images/bulletin/beatcs89.pdf
{ "domain": "cstheory.stackexchange", "id": 1053, "tags": "co.combinatorics, fl.formal-languages, automata-theory, regular-language, dfa" }
What does it mean for particles to "be" the irreducible unitary representations of the Poincare group?
Question: I am studying QFT. My question is as the title says. I have read Weinberg and Schwartz about this topic and I am still confused. I do understand the meanings of the words "Poincaré group", "representation", "unitary", and "irreducible", individually. But I am confused about what it means for it to "be" a particle. I'm sorry I'm not sure how to make this question less open-ended, because I don't even know where my lack of understanding lies. Answer: Irreducible representations of the Poincare group are the smallest subspaces that are closed under the action of the Poincare group, which includes boosts, rotations, and translations. The point is that we should interpret these subspaces as the set of possible states of a particle. For example, if you start with a state representing a particle at rest, then you can boost it (so it starts moving), rotate it, translate it, and so on. But all the states you can reach represent, by definition, the same kind of particle, just in different states of motion. The requirement that the representation be unitary just means these operations keep states normalized.
{ "domain": "physics.stackexchange", "id": 72285, "tags": "quantum-field-theory, special-relativity, group-theory, representation-theory, poincare-symmetry" }
Is length contraction multidimensional?
Question: I'm solving this problem in which the rod with length $L_0$ is moving at speed v along the horizontal direction; this rod makes an angle $\theta_0$ with the x-axis and I am supposed to determine the length of the rod as measured by the stationary reference frame. I started with breaking the Length of the rod into horizontal and vertical components; the horizontal component will get contracted for sure, but I am not sure if the vertical component also will contract or not? Is length-contraction multi-dimensional? Answer: The component that is seen contracted by a moving frame, is along the direction of the velocity of the frame. In your case since the rod is seen to be moving in the horizontal direction, therefore only the horizontal component contracts and not the vertical one.
{ "domain": "physics.stackexchange", "id": 87510, "tags": "general-relativity, special-relativity" }
Helmholtz Free Energy minimization during an irreversible process
Question: Consider the classical $(N,V,T)$ system, and its Helmholtz free energy (HFE) $A=U-TS_{system}$. The system is placed in contact with an hotter heath bath. It is said that, at equilibrium, the HFE of the system reaches a minimum, i.e. $dU - TdS_{system} = 0$. But, for an irreversible heat transfer $dQ$ from the heat bath to the system, we end up with $dQ=dU<TdS_{system}$. So when does the equilibrium get realized? Do we need an "extra" transfer of heat to the system? Answer: Your differential form is incomplete. Actually: $$dF=dU -SdT-TdS =-SdT$$ Equilibrium is reached when $dF=0$, so when $-SdT=0$, meaning the temperature constant over time ($T_{system}=T_{bath}$). That's why at constant temperature Helmoltz free energy is the minimum potential. In the same way regarding a constant pressure process the enthalpy is given by: $$H=U-PV$$ Because it's differential form is then $dH=dU+VdP+PdV=VdP$. At equilibrium with a pressure bath, enthalpy do not vary and is in a minimum. Each of these is obtained by a Lagrange multiplier meaning that for a constraint on an intensive parameter $X$ coupled to an extensive parameter $Y$ such as $\frac{dU}{dY}=X$ you can construct Z the thermodynamic potential associated like: $$Z=U-XY$$ Such as its differential form vanishes at equilibrium. I hope this helps, a bit, the underlying physics is better understood with solid math background in my opinion. edit: I made a little mistake, the first formula is only valid at constant volume as my differential is $dF=-PdV-SdT$ in the general case. In any case at equilibrium the volume of the system is supposed constant and no irreversible work is involved. The same reasoning apply as for the internal energy at equilibrium of a closed system.
{ "domain": "physics.stackexchange", "id": 33377, "tags": "thermodynamics, energy, statistical-mechanics" }
In Reimer-Tiemann reaction why does phenol attack the carbene from ortho position?
Question: From the Wikipedia article on Reimer-Tiemann reation: In step 5, why doesn't the oxygen attack the carbene. The way I see it: upon attacking the carbene from ortho position, the mechanism proceeds via a non aromatic intermediate. Oxygen bearing a negative charge is a good nucleophile too. So I proceeded with oxygen acting as an nucleophile and came up with this mechanism: The final product is not a major product (I wasn't even able to find it listed as a product after a cursory google search), so why does phenoxide attack the carbene from ortho position. EDIT The product I'm asking about has not even been mentioned in the linked question. Answer: First of all, in the phenoxide ion, the lone pair on the oxygen atom doesn't remain fully available, it remains in continuous conjugation with the double bond of the benzene ring. In the resonance hybrid, there is also electron density on the Carbon atom in the ortho and para position,and also definitely on the oxygen atom itself. But, Carbon being less electronegative acts as a better nucleophile in this case rather than Oxygen. So, attack from the Carbon atom rather than Oxygen atom is more probable. More over, attack from the ortho position, though creates a chance of Steric crowding, but the intermidiate generated from the attack by ortho-position creates an ordered transition state.The metal ion(say, $\ce{Na+}$) can easily interact with the electronegative $\ce{O}$ and $\ce{Cl}$ atoms, and can increase the stability of the transition state. Here is a picture of the intermidiate and a 3D view of it, This ordered transition state will not appear in case of attack by the Oxygen atom. This is also another reason why that transition state is more favoured, and the reaction rate through that step is higher than any other possible steps.
{ "domain": "chemistry.stackexchange", "id": 10592, "tags": "organic-chemistry, reaction-mechanism, carbonyl-compounds" }
difference between sabertooth motor controller and rc esc?
Question: i am building a rc/robot mower. most of the youtube videos show the sabertooth motor controllers being used to connect the rc receiver to the dc motors. But here in Australia, the sabertooth i need cost about 200 dollars. RC esc sell for about 10 dollars. What is the difference between the esc and the sabertooth and can i use an esc instead? the specs i have on the motor and battery are 12v 10amp normal, 35amp stall. and my rc is a flysky Answer: i have ended up using 2 RC ESCs they can easily handle the 35 amps and 12v. the flysky remote also allows mixing so that the 2 motors/escs can handle skid steering on one stick of the remote. So much cheaper
{ "domain": "robotics.stackexchange", "id": 1397, "tags": "motor, esc" }
Application of Expander Codes
Question: I need to give a talk about expander codes at university (I'm a student of computer science). Since they have been introduced to show a family of codes looking good when thinking of the Shannon theorem, I wonder what real world application for expander codes exist. As far as I know one has a real hard time when encoding but decoding is quite fast. Why aren't they used to encode write only media or similar? What is their big disadvantage? Answer: One area that you could consider is Fault Tolerant Parallel Computation. Expander codes and graphs could be efficiently used in them like: Highly fault-tolerant parallel computation, by Daniel A. Spielman On word-level parallelism in fault-tolerant computing
{ "domain": "cs.stackexchange", "id": 1035, "tags": "graphs, combinatorics, coding-theory, expanders" }
What causes plastic to become brittle and can it be softened again?
Question: I have some old computer case parts made of ABS plastic. Two decades of storage has embrittled the plastic to the point where they easily crack or shatter with the slightest bend. What is going on chemically that's causing the plastic to become brittle. Is there anything that can be done to reverse it? Answer: Unfortunately, there is not too much to do. The original ABS substance is a copolymer made of acrylonitrile, butadiene and styrene. It is rather hard and brittle. To make it softer, some proportion of a plasticizer has to be added when it's hot and malleable. In the last century, the plasticizer was in general dinonylphthalate. It works well, but it is a bit volatile. So after a couple of years, the plasticizer gets evaporated, and the polymer becomes brittle again. This "illness" can only be cured by remelting the plastic and adding some new dinonylphthalate. Today better plasticizers have been discovered that are not so volatile.
{ "domain": "chemistry.stackexchange", "id": 16106, "tags": "organic-chemistry, polymers, plastics" }
Can OR be "undone"?
Question: Suppose that $Z = X \vee Y$, where $X$, $Y$ and $Z$ are 96-bit binary numbers. If I'm given the values of $Z$ and $Y$, is it possible to work out what $X$ is? I know this is possible with XOR but can it be done with OR? Answer: It is easy to find out that there can be more than one value, used as $X$, to satisfy $Z = X \vee Y$. When a specific bit of $Y$ is $1$, there are two possibilities for such bit in $X$, i.e., $0$ or $1$. Let's make a simple example with a 2-bit number: $Y$ = $10$ and $Z$ = $11$ The possible values of $X$ are: $11$ $01$ because: $11 \vee 10 = 11$ $01 \vee 10 = 11$ In short, you don't have the certainty that the end result of the reverse operation of $\vee$ will be a unique result.
{ "domain": "cs.stackexchange", "id": 5403, "tags": "logic" }
What if dark matter occupied three other dimensions and it could only interact with our three dimensions through gravity and time?
Question: I recently thought about dark matter existing in three other dimensions similar to ours but wasn't sure if it was logical. It would be of great help if someone would give me a clear insight into this topic. Answer: It is logical as a conceptual proposal in that it breaks no known laws of physics, but there is no underlying mathematical model which would suggest it, in fact quite the reverse. One would have to extend the theories of higher dimensions quite fundamentally in order to accommodate it, and there are reasons why this would in practice be problematic. For example the theory would have to explain why and how the six extended spatial dimensions were grouped in threes and no other natural phenomena could cross the divide. Also, we have reasons for limiting the maximum number of dimensions to twelve. Three each, plus time, leaves only five for string theories, which is not enough to accommodate all of quantum physics. Nevertheless, I regard it as about equal in validity to the idea that our 10-dimensional "string" world is a "brane" floating in an 11- or 12-dimensional plenum and that gravity is so weak because it leaks away into the plenum. Your dark Universe could be just a parallel brane, perhaps containing all the missing antimatter, whose gravity is leaking into ours. Given such speculative brane models, why not add yours to the mix?
{ "domain": "physics.stackexchange", "id": 76026, "tags": "cosmology, universe, dark-matter, spacetime-dimensions" }
Parallel programming models: Why do OpenMP and MPI dominate?
Question: In a lecture on HPC parallel programming (for CPUs) we discussed various models available from Pthreads to OpenMP to MPI and others like Charm++, X10, UPC, Chapel…. The main focus was clearly on the first three. Intrigued by the topic I did some research on my own but could find sources, examples and discussions mainly for those first three. Later I discovered "AMPI" which can even run legacy MPI application with many of the benefits Charm++ offers but nobody seems to use it. Why are the other models so neglected? Compared to the rather crude MPI alternatives like Charm++ and UPC appear more convenient as they handle a lot of the "dirty work" neatly in the background. Answer: I will give you two main reasons why MPI and OpenMP dominate. The first one is that they are standards. There are several available implementations, and you can be sure to find them available on every possible supercomputer machine. This is strictly related to the concept of portability: you write your parallel code and, if you do not use anything else related to a particular machine, you can run the same code on all of the parallel machines you have available. The second reason is the programmer's resistance. With MPI and OpenMP covering both message-passing and shared-memory parallel programming (and you can even use them simultaneously if useful and/or required) why a programmer, especially a beginner in the field, should spend his/her time learning new parallel programming languages that are not granted to become standards? Sure, as you pointed out, other languages provide some benefits but the question is, would you invest your time betting on something that may even totally disappear in the future ? Standards are here to stay. Anyway, regarding this specific point, my humble opinion is that, if possible, one should instead spend some time on different languages (not just those one for parallel programming) because this is a useful discipline and learning different paradigms is always beneficial. However, when reading your curriculum vitae for possible hiring, the people that can hire you for a position will be in general mostly interested to your knowledge of the standards, not to your knowledge of niche languages (in general, even though there could be exceptions of course).
{ "domain": "cs.stackexchange", "id": 15715, "tags": "programming-languages, computation-models, parallel-computing, message-passing" }
What does it mean for AlphaZero's network to be "fully trained"
Question: Reading this blog post about AlphaZero: https://deepmind.com/blog/article/alphazero-shedding-new-light-grand-games-chess-shogi-and-go It uses language such as "the amount of training the network needs" and "fully trained" to describe how long they had the machine play against itself before they stopped training. They state training times such as 9 hours, 12 hours, and thirteen days for chess, shogi, and Go respectively. Why is there a point at which the training "completes?" They show plots of AlphaZero's performance on the Y axis (its Elo rating) as a function of the number of training steps. Indeed, the performance seems to level out as the number of training steps increases beyond a certain point. Here's a picture from that site of the chess performance vs training steps: Notice how sharply the Elo rating levels off as a function of training steps. First: am I interpreting this correctly? That is, is there an asymptotic limit to improvement on performance as training sessions tend to infinity? If I am interpreting this correctly, why is there a limit? Wouldn't more training mean better refinement and improvement upon its play? It makes sense to me that the millionth training step may yield less improvement than the very first one, but I wouldn't expect an asymptotic limit. That is, maybe it gets to about 3500 Elo points in the first 200k training steps over the course of the first 10 hours or so of playing ches. If it continued running for the rest of the year, I'd expect it to rise significantly above that. Maybe double its Elo rating? Is that intuition wrong? If so, what are the factors that limit its training progress beyond the first 10 hours of play? Thanks! Answer: Neural network will eventually reach limit of it's approximation power. You can't expect to learn more and more things infinitely long with the same amount of learnable parameters. Also, if you eventually reach optimal performance, you can't play more optimal than what optimal is (not saying that it reached optimal performance but possibly something close to optimal for it approximation abilities). So probably combination of those two leads for the performance increase to reach it's limit.
{ "domain": "ai.stackexchange", "id": 1317, "tags": "training, alphazero" }
Create identicon of a DNA sequence
Question: How can I create an identicon (a visual representation) of a DNA sequence? Answer: Sequenticon is a Python library for generating identicons for DNA sequences. For instance, the sequence ATGGTGCA gets converted into the following icon: A web interface is also provided at EGF CUBA (Collection of Useful Biological Apps): Render Sequenticons Disclaimer: I'm the current maintainer of Sequenticon
{ "domain": "bioinformatics.stackexchange", "id": 1654, "tags": "visualization, egf" }
What does this do: roslaunch turtlebot3_bringup turtlebot3_remote.launch
Question: The name and the documentation implies somehow that it would launch a turtlebot3 "remotely" i.e. when running this launch file on another computer (not the robot itself.) But I just cant understand how that can be and reading the .launch file didn't help me. Originally posted by pitosalas on ROS Answers with karma: 628 on 2018-07-30 Post score: 0 Answer: Hi :) turtlebot3_remote.launch file includes robot_model and launch robot_state_publisher node. <node pkg="robot_state_publisher" type="robot_state_publisher" name="robot_state_publisher"> <param name="publish_frequency" type="double" value="50.0" /> <param name="tf_prefix" value="$(arg multi_robot_name)"/> </node> So, this makes to show state of your robot by tf. If you want more detail information, please visit below links. http://wiki.ros.org/robot_state_publisher http://wiki.ros.org/robot_model Originally posted by Darby Lim with karma: 811 on 2018-08-02 This answer was ACCEPTED on the original site Post score: 1
{ "domain": "robotics.stackexchange", "id": 31413, "tags": "ros, ros-kinetic, turtlebot3, robotis" }
Could the speed in which the plate is moving affect how quickly mountains rise?
Question: In an episode of History Channel's How the Earth Was Made, there was a remark when India left the rest of Gondwana 80 million years ago only to collide with mainland Asia 50.5 million years ago. The remark was, "very fast in geological time." Afterwards, it was said that "as with any smash, the faster the collision, the bigger the wreck." In the episode, those remarks were written as an explanation for the Himalayas' "unique size". But is this true? Could the speed in which one plate collides with another REALLY affect the size of a mountain range? Answer: The height of the Himalayas Like Keith McClary says in his answer, there really are two factors in creating growing/shrinking mountains. Mountains grow due to various reasons. In the case of the Himalayas it's like you must've seen in the documentary: the collision between the Indian and Eurasian continental plates. The material has to go somewhere and thus goes up (simplifying the geology a bit; I think that concepts like buoyancy these days also give some nuance to the subject, but the general picture stays the same). In the Himalayas, the two plates have moved into each other for at least 2500 km, so that's a lot of ground to move! If that was the only process at play, there would be way higher mountains. I couldn't find a corresponding figure for the Himalayas, so consider instead the following present-day profile through Switzerland, running roughly north (left) to south (right). You can see that the present-day Alps are at maximum 4 km high. Compare that against a reconstruction of the geology that should be present, running along basically the same profile: You can see that the mountains could be as much as 20 km high! You can appreciate just how much mountain has disappeared, for another very young mountain range! So, the reason that mountains aren't as big as they could/should have been, is that they are cut down by erosion. Rain, snow, wind, temperature, all break down mountainous rocks into smaller broken pieces. Those pieces are transported away from the mountains as sediment in rivers (such as the Yangtze for the Himalayas). So, the height of the Himalayas is very much controlled by the relative size of the two effects: how fast does the mountain range grow, and how fast does erosion break it down again. If the two are in equilibrium, the mountain doesn't shrink or grow. If erosion dominates, the mountain will slowly disappear over time. But in the case of the Himalayas, the mountain building generally outpaced the erosion, leading to the unique heights of the Himalayas. In this case, the speed of the collision is definitely the main factor! The size of the Tibetan Plateau Now if your question was also about why the Tibetan Plateau is so wide (rather than the Himalayas so high), I recommend you read https://oak.ucc.nau.edu/wittke/Tibet/Plateau.html , a very well-written summary of the three main hypotheses: The speed of the collision has lead to a very step-wise straight faults toppling over each other, rather than folding over, which could lead to very long faulted sections. "Crust is thickened by the faulting and subsequent movement of large masses of rock, which are stacked one on top of another like cordwood." When India hit the Eurasian plate it still had a heavy oceanic plate that was going under ('subducting') under the Eurasian plate. Now the oceanic plate has disappeared, and now the continental plate is being pulled underneath the Eurasian plate. "This process is reminiscent of taking a block of ice and pushing it beneath another ice slab, causing the latter to rise upwards. However, it is difficult to imagine how the buoyant Indian crust could be kept deep enough to get far beneath the plateau before bobbing to the surface. Perhaps the great speed at which India is colliding to Eurasia allowed this to happen." The last hypothesis is simply that the continental collision puts a lot of heat below the plateau. "Like a hot-air balloon, the heated crust is buoyant and rises with the addition of light granitic material at the bottom of the Eurasian crust increasing the height of the Plateau." This picture shows all those three hypotheses. 1=distributed shortening, 2=underthrusting, 3=lower crustal flow. In each case, a plateau is formed relatively far away from the actual collision between the two plates. It is likely that, really, all three hypotheses together are at play. As explained on the website, at least the first two hypotheses are thought to be related to the speed of the collision. So, yes, both the height and the width of the Himalayas are thought to be linked to the speed of the collision/relative plate movement!
{ "domain": "earthscience.stackexchange", "id": 1991, "tags": "plate-tectonics, mountains, mountain-building" }
Intuitive reasons for superconductivity
Question: Superconductivity I read in a book "Physics - Resnik and Halliday" the explanation of Type-I Superconductors {cold ones} that: The Electrons that make up current at super-cool temperatures move in coordinated pairs. One of the electrons in a pair may electrically distort the molecular structure of the superconducting material as it moves through, creating nearby short-lived regions of positive charge.the other electron in the the pair may be attracted to the positive spot. According to the theory the coordination would prevent them from colliding with the molecules of the material and thus would eliminate electrical resistance Is this the only explanation or can somebody give me a more intuitive explanation that also takes into the problem of defect scattering as in the case of resistance and also explains the Type-II superconductors {hot ones} P.S. What are "coordinated pairs"? Answer: When you scatter an electron you change it's energy. So if it wasn't possible to change the energy of an electron you couldn't scatter it. This is basically what happens in superconductors. In a metal at room temperature the electrons have a continuous range of energies. This means if I want to change the energy of an electron by 0.001eV, or even 0.000000001eV there's no problem doing this. This is a big difference from an isolated atom, where the electrons occupy discrete separated energy levels. If you try to change the energy of the electron in a hydrogen atom by 0.001eV you can't do it. You have to supply enough energy to make it jump across the energy gap to the next energy level. In superconductors the correlation between the electrons effectively turns them into bosons and they all fall into the lowest energy state. However the correlation also opens a gap between the energy of this lowest state and the energy of the next state up. This is why defects in the solid can't scatter electrons, and why they conduct with no resistance. For an electron to scatter off a defect (or anything else) in the conductor you have to supply enough energy to jump across the gap between the lowest energy level and the next one up. However the energy available isn't great enough for this, and this means the defects can't scatter the electrons and that's why they superconduct. The trouble is you're now going to ask for an intuitive description of why the electron correlations open a gap in the energy spectrum, and I can't think of any way to give you such a description. Sorry :-(
{ "domain": "physics.stackexchange", "id": 3545, "tags": "superconductivity" }
Can we see beyond the Hubble Volume?
Question: The wiki article on the Hubble volume says that the Hubble volume is often confused with the limit of the observable universe. This thread: When will the Hubble volume coincide with the volume of the observable Universe? seems somewhat inclusive and is related, but not identical. Is it possible for us to see beyond the Hubble sphere? I would think it isn't because it makes no sense if you think about seeing something which is receding faster than light. I think that light from us will never reach the body, and hence will never reflect off the body to come back to us. But apparently it works somehow. My focus is upon the counter-intuitive nature of the Hubble Volume's definition, rather than the actual equations which describe the furthest point identifiable. Answer: A crucial mistake in the question's reasoning is that the electromagnetic radiation doesn't need to be emitted by us and reflected by the receding body. Any radiation from there can help us see. But yes, we can see objects beyond the Hubble sphere right now. The main concept behind this is the fact that the Hubble sphere is expanding as the universe expands. Let's imagine a photon traveling towards us from a body beyond the Hubble Sphere. The definition of the Hubble Volume tells us that the photon's traveling through a region which is receding from us faster than the speed of light, hence the photon will be receding from us as well, at the difference between the speed of expansion of the universe at that point and the speed of light. However, sometimes the speed at which the Hubble sphere is expanding is faster than this speed at which the photon recedes. So at some point of time, the photon will end up inside the Hubble sphere, after which we deal with it easily.
{ "domain": "physics.stackexchange", "id": 49299, "tags": "cosmology" }
How we can identify the problem of Overfitting and underfitting and maintain bias?
Question: Basically, I'm new to the data science field, and I'm getting a little bit of confusion about overfitting and underfitting. Are overfitting and underfitting is totally depending upon the number of datasets or the behavior of data ? Can anyone explain the term of overfitting and underfitting and how to deal with this kind of problem? Answer: Under/overfitting depends on two things: the amount of data in your dataset and the complexity of your model. To identify when each of these is happening, you will have to split the data you have into two parts: training data and test data. You then train your model only on the training data, and then evaluate its performance (e.g. calculate its accuracy or any other metric you are interested in) on the training data and test data. If your model performs well on your training data (e.g. you get a very good accuracy while training a model), but cannot make good predictions on your test data, then we say that the model is overfitting. What this means is that the model has memorized the training data instead of learning the patterns in it. As a result, it cannot generalize and make good predictions on data it hasn't seen before (e.g. the test data). This could be fixed by either reducing the complexity of the model (e.g. if it is a neural network then reduce the number of layers) or by increasing the amount of data (e.g. collecting more data, or using data augmentation techniques) If your model doesn't perform well on both training and test data, then we say it is underfitting. This means that the model is not complex enough to learn the pattern in the training data. This can be fixed by using a more complex model (i.e. a model with more parameters).
{ "domain": "datascience.stackexchange", "id": 8296, "tags": "machine-learning, deep-learning, statistics, overfitting" }
Why do Type II Restriction Endonucleases cleave at palindromic sequences?
Question: Type II Restriction enzymes usually cut only at palindromic sequences. Is there any specific reason for that? Is there any advantage for bacteria if they cleave phage DNA at this type of sequence? Answer: First, not all restriction enzymes cut at palindromic sequences. A lot of them do though, simply because it is more effective. Recognising a palindromic sequence enables them to cut both strands of DNA at the "same" site, because the strand will have the same sequence only in different directions at that site. See Wikipedia for example.
{ "domain": "biology.stackexchange", "id": 9566, "tags": "biochemistry, bacteriology, restriction-enzymes" }
Questions about the derivation of the rocket equation
Question: Let us derive the final velocity of a rocket facing straight up that is attempting to leave the earth's surface. Basically, we are deriving Tsiolkovsky's rocket equation. We can see that initially, the rocket has some velocity v and a mass m. After a small change in time, we denote $dt$. The rocket has expelled burnt fuel that has a mass $dm$ and a velocity $(v-v_{e})$ where $v_{e}$ is the velocity of the gas relative to the rocket. The rocket now has a mass of $(m-dm)$ and an increased velocity of $(v+dv)$ Since the net external force is not zero momentum is not conserved, however, we can do the following. We can begin by showing that a small change in the momentum of the system is equal to a small change in the impulse. Since everything is happening in the same dimension we leave out the unit vector notation. $$d\boldsymbol{\vec{p}} = d\boldsymbol{\vec{J}}$$ $$d\boldsymbol{\vec{p}} = p_{f} - p_{i}$$ $$d\boldsymbol{\vec{p}} = (m-dm)(v+dv)+dm(v-v_{e})-mv$$ After expanding and cancelling some terms , $$d\boldsymbol{\vec{p}} = m\hspace{0.2em}dv - v_{e}\hspace{0.2em}dm$$ Since the gravitational force is the net external force on the system we can define the impulse to be , $$d\boldsymbol{\vec{J}} = F_{net} \hspace{0.2em}dt$$ $$ F_{net} = -mg $$ $$d\boldsymbol{\vec{J}} = -mg \hspace{0.2em}dt$$ After equating $d\boldsymbol{\vec{p}}$ and $d\boldsymbol{\vec{J}}$ $$m\hspace{0.2em}dv - v_{e}\hspace{0.2em}dm = -mg\hspace{0.2em}dt$$ Solving for dv , $$dv = \frac{-mg\hspace{0.2em}dt + v_{e} \hspace{0.2em}dm}{m}$$ $$dv = \frac{v_{e}}{m}\hspace{0.2em}dm - g \hspace{0.2em}dt$$ Integrating both sides we get, $$\int_{v_0}^{v}dv = \int_{m_0}^{m}\frac{v_{e}}{m}\hspace{0.2em}dm - \int_{0}^{t}g \hspace{0.2em}dt$$ $$$$ After evaluating the integrals and doing some algebra manipulation we get, $$\boxed{v = v_{0} + v_{e}\ln\left(\frac{m_0}{m}\right) - gt}$$ $$$$ After this derivation I would like to ask the following questions? We define the net external force on the system to be the graviational force , however , the rocket is expending fuel in the y direction which is pushing it up by newtons third law , why dont we include the reaction force $F_{reaction}$. I notice that we define the velocity of the expelled burnt fuel to be $(v-v_{e})$ I understand that this is because we want to describe the velocity of the gas relative to the observer on the ground since the velocity of the rocket is defined to be relative to the ground. But why do we define it to be $(v-v_{e})$ I understand why the velocities are subtracted since they are in opposite directions but I'm still left handing... What is a general way to define the velocity of an object relative to something? How realistic is this equation? , it does not include the force of fluid friction opposing the motion of the upward rocket. How would I redefine the equation to include for that? The impulse would change and therefore become $$d\boldsymbol{\vec{J}} = -mg\hspace{0.2em}dt-\frac{1}{2}C\rho Av^2\hspace{0.2em}dt$$ equating this to the change in momentum , $$d\boldsymbol{\vec{p}} = d\boldsymbol{\vec{J}}$$ $$m\hspace{0.2em}dv - v_{e}\hspace{0.2em}dm = -mg\hspace{0.2em}dt-\frac{1}{2}C\rho Av^2\hspace{0.2em}dt$$ here we would have to solve for dv and then integrate but it looks a bit complicated. Additionally to add even more realism , we know that the air denisty $\rho$ changes as a function of the distance above earths surface. But I dont think this is necessarily needed since it might not play a large role in the end. Answer: 1. Look at formula for $d\overrightarrow{p}$. In the final momentum you have considered momentum of both rocket (m-dm) and fuel (dm). This means that system includes rocket and fuel. Force between rocket and fuel is internal and will not be considered in $F_{net}$ 2 In general, relative velocity is given by $$ v_{AG} = v_{AF} + v_{FG} $$ Here $v_{AG}$ is velocity of "A" with respect to ground (G). $v_{AF}$ is velocity of A with respect to Frame (F). $v_{FG}$ is velocity of frame with respect to ground. (In reality A, F and G can be anything. However it is easier to visualize it this way) In this example: $v_{FG}$ is +v (velocity of rocket (frame) with respect to ground); $v_{AF}$ is $-v_e$ (velocity of fuel (A) with respect to rocket (frame)). This means $v_{AG} = -v_e + v$ (velocity of A with respect to ground) 3 You are right. This formula is approximate in many ways. Apart from air drag and variation in density, gravity also decreases as you move upwards. Variation in density and gravity will not matter for small toy rockets (used in firework display). But air drag is unlikely to be negligible. So why are those factors ignored? I don's know for sure. But I have a theory. As soon as air drag is considered, obtaining a direct result for velocity becomes complicated (as you have noticed). So air drag is left out for pedagogical reasons.
{ "domain": "physics.stackexchange", "id": 87937, "tags": "newtonian-mechanics, rocket-science" }
Differences Between Two $ {L}_{1} $ Norm Minimization Schemes
Question: I was reading and working with L1 regularized least squares, where: $$ \arg \min_{\boldsymbol{x}} \frac{1}{2} {\left\| A \boldsymbol{x} - \boldsymbol{y} \right\|}_{2}^{2} + \lambda {\left\| \boldsymbol{x} \right\|}_{1} $$ is used to solve for sparse solutions in $\boldsymbol{x}$. However, I also stumbled on a different minimization for a similar case: $$ \arg \min_{\boldsymbol{x}} {\left\| \boldsymbol{x} \right\|}_{1} \mbox{ subject to } {\left\| A \boldsymbol{x} - \boldsymbol{y} \right\|}_{1}< \delta $$ so in the second case the expression is not unconstrained, but they switched both parts to L1 norm... What are the reasons to do so if both look for sparse solutions in $\boldsymbol{x}$ ? Answer: The first equation you have is often called the Quadratic Problem, which through the use of Duality can be shown to be equivalent to the Basis Pursuit De-Noising (BPDN) given as: $$ \arg \min_{\boldsymbol{x}} {\left\| \boldsymbol{x} \right\|}_{1} \mbox{ subject to } {\left\| A \boldsymbol{x} - \boldsymbol{y} \right\|}_{2}< \delta .$$ In your 2nd problem, the L2 norm is replaced with the L1 norm in the constraint. When you constrain the L2 error, you tend to end up with an error vector that is quite dense, i.e. a lot of non-zero elements, and each error is small with respect to $\delta$. By switching the error to the L1 norm, then they are trying to make the error vector sparse (as opposed to dense). So in this formulation: $$ \arg \min_{\boldsymbol{x}} {\left\| \boldsymbol{x} \right\|}_{1} \mbox{ subject to } {\left\| A \boldsymbol{x} - \boldsymbol{y} \right\|}_{1}< \delta $$ They are trying to find: An $\boldsymbol{x}$ that is sparse under the constraint that $\boldsymbol{e}=A \boldsymbol{x} - \boldsymbol{y}$ is also sparse. In this sense you might think that the problem they really want to solve is: $$ \arg \min_{\boldsymbol{x}} {\left\| \boldsymbol{x} \right\|}_{0} \mbox{ subject to } {\left\| A \boldsymbol{x} - \boldsymbol{y} \right\|}_{0}< \delta $$ but because that is a very difficult problem, they are using the convex formulation they have given.
{ "domain": "dsp.stackexchange", "id": 10170, "tags": "deconvolution, sparsity, sparse-model, norm" }
What is the difference between "derive" and "predict"?
Question: I'm working on attaining a better understanding of physics through independent study following S&Z 12E, and my book has asked me to derive a quantity from given laws and principles. I was able to succeed, but it was complicated, and something I'd like to be able to understand better. I'm wondering if this differs in usage in any meaningful way from the process of "making predictions with physics." Is the difference simply semantic because when making a derivation you won't have necessarily proposed any physical system to realize the assumed relationships, such as $Dv^2$ as a realized fluid resistance? The definition of "derive" that I've intuited is $$\text{A process by which physical laws and principles are taken}$$ $$\text{ with assumed quantities/relations, and traced logically}$$ $$\text{ to conclusions about the predicted relationships between quantities in the given situation.}$$ Is this correct? complete? incomplete? misleading? Is derivation simply synonym for "making predictions with physics" where the situation is taken to be partially unknown / underspecified? I'm sorry if these tags are hit-or-miss, I'm not sure how to classify this question. Update: Here's a mindmap detailing concepts I'm drawing from and their interrelationships. Reference: Young, Hugh D., et al. Sears and Zemansky's University Physics / Hugh D. Young, Roger A. Freedman ; Contributing Author, A. Lewis Ford. Pearson Addison-Wesley, 2008. Answer: As comment by John Doty: To derive means creating a mathematical line of thought, starting with some principles, assumptions etc. to arrive at a formula or theory. (The result need not be a "prediction", it may be an explanation for a previously known result. Although the derived theoretical result will then be available to predict outcomes in similar experimental setups, where so far no results have been obtained.) To predict literally means to make a prediction (a statement about something yet to be determined), which can be based on previous experimental evidence (the ball will fall down and not up because we have previously observed the same behavior every time we let it go), basic (qualitative) principles (the direction of the current will be ... because of Lenz's rule) or a derivation of some theoretical result (gravitational lensing, for example, derived from GR and then tested in experiment).
{ "domain": "physics.stackexchange", "id": 89545, "tags": "terminology, definition, soft-question, models" }
Simple JavaScript canvas game
Question: Here is a link to the code on JSFiddle. This is my first attempt at playing with canvas. Before I move on doing anything else, it would be nice to have insight from somebody who knows canvas and JavaScript better than me. Things I am looking for: Ways to optimize animation Ways to optimize the lazer drawing (I know I need to clear the lazers from the array every once in awhile when they are no longer within the drawing area, just haven't gotten around to it yet.) Ways to optimize the code in general and have good code re-use. HTML: <canvas id="world" style="height: 300px; width: 300px;" /> JavaScript: console.log("Game starting..."); var ship = new Object(); ship.name = "Enterprise"; ship.x = 0; ship.y = 0; ship.width = 50; ship.left = false; ship.right = false; ship.up = false; ship.down = false; ship.fire = false; ship.firerate = 5; ship.cfirerate = 0; var lazers = new Array(); var world = document.getElementById('world'); var cxt = world.getContext("2d"); $(document).bind('keydown', function(e) { if(e.keyCode==37){ ship.left = true; } if(e.keyCode==38){ ship.up = true; } if(e.keyCode==39){ ship.right = true; } if(e.keyCode==40){ ship.down = true; } if(e.keyCode==90){ //Z console.log("pew pew"); ship.fire = true; } }); $(document).bind('keyup', function(e) { if(e.keyCode==37){ ship.left = false; } if(e.keyCode==38){ ship.up = false; } if(e.keyCode==39){ ship.right = false; } if(e.keyCode==40){ ship.down = false; } if(e.keyCode==90){ //Z ship.fire = false; } }); function createLazer(type) { if (type == 1) {//LEFT LAZER cxt.beginPath(); cxt.moveTo(125+ship.x,140+ship.y); cxt.lineTo(125+ship.x,130+ship.y); var l = new Object(); l.type = type; l.x = ship.x; l.y = ship.y; return l; } else if (type == 2) {//RIGHT LAZER cxt.beginPath(); cxt.moveTo(125+ship.x+ship.width,140+ship.y); cxt.lineTo(125+ship.x+ship.width,130+ship.y); var l = new Object(); l.type = type; l.x = ship.x; l.y = ship.y; return l; } } function drawWorld() { cxt.fillStyle="#808080"; cxt.fillRect(0,0,300,300); } function drawLazers() { for (x = 0; x < lazers.length; x++) { cxt.beginPath(); cxt.strokeStyle="#FF0000"; if (lazers[x].type == 1) { cxt.moveTo(125+lazers[x].x,140+lazers[x].y); cxt.lineTo(125+lazers[x].x,120+lazers[x].y); } else if (lazers[x].type == 2) { cxt.moveTo(125+lazers[x].x+ship.width,140+lazers[x].y); cxt.lineTo(125+lazers[x].x+ship.width,120+lazers[x].y); } cxt.stroke(); lazers[x].y = lazers[x].y - 6; //console.log("drawing lazer" + lazers[x].x + lazers[x].y); } } function drawShip() { if (ship.left) { ship.x = ship.x -5; } if (ship.right) { ship.x = ship.x +5; } if (ship.up) { ship.y = ship.y -5; } if (ship.down) { ship.y = ship.y +5; } if (ship.fire) { if (ship.cfirerate == 0) { lazers.push(createLazer(1)); lazers.push(createLazer(2)); ship.cfirerate = ship.firerate; } } if (ship.cfirerate != 0) { ship.cfirerate = ship.cfirerate - 1; } cxt.beginPath(); cxt.strokeStyle="#000000"; cxt.moveTo(125+ship.x,140+ship.y); cxt.lineTo(150+ship.x,120+ship.y); cxt.lineTo(175+ship.x,140+ship.y); cxt.stroke(); } function clear() { cxt.clearRect(0, 0, 300, 300); } function gameLoop() { drawWorld(); drawShip(); drawLazers(); } setInterval(function() { clear(); gameLoop(); }, 30); Answer: Cool program! I have put my review of the code on JsFiddle. A basic synopsis of what I thought to improve: Everything constant about the map, ship, lasers, and keycodes is all in one place to improve scalability. I used object literals and array literals instead of new Object() and new Array() because using them is shorter and and makes things easier to manipulate. The keydown and keyup event handlers were refactored to eliminate duplicate code. The createLaser and drawLasers methods were refactored. I removed some drawing code from createLaser because it didn't seem to do anything, and I removed calculations in drawLasers that were redundant with createLaser. I added code in drawLasers to remove lasers from the array that are no longer on the map. I also removed or rearranged drawing code that didn't do anything or was being called too many times. I removed the clear() function because it didn't seem to do anything. I changed statements of form x = x + y, x = x - y and x = x + 1 to x += y, x -= y, and x++, respectively. I changed one instance of the form array.push(x); array.push(y); to array.push(x,y); I renamed lazer to laser because I kept typing laser and it caused bugs that here hard to find. You can rename it back, if you are accustomed to typing lazer. Here is a copy of the revised code: console.log("Game starting..."); var ship = { name: "Enterprise", x: 125, y: 120, width: 50, height: 40, left: false, right: false, up: false, down: false, fire: false, firerate: 5, cfirerate: 0, moveInterval: 5, color: "#000000" }, map = { width: 300, height: 300, color: "#808080", drawInterval: 30 }, laser = { height: 20, moveInterval: 6, color: "#FF0000" }, lasers = [], keys = { left: 37, up: 38, right: 39, down: 40, fire: 90 //Z }, getKey = function(key) { for (var i in keys) { if (keys.hasOwnProperty(i)) { if (keys[i] === key) { return i }; } } }, eventValues = { keyup: false, keydown: true }, types = { right: 1, left: 2 }; var world = document.getElementById('world'); var cxt = world.getContext("2d"); $(document).bind('keydown keyup', function(e) { var key = getKey(e.keyCode); ship[key] = eventValues[e.type]; }); function createLaser(type) { var x = ship.x; if (type === types.right) { x += ship.width; } var y = laser.height + ship.y; return { type: type, x: x, y: y, } } function drawWorld() { cxt.fillStyle = map.color; cxt.fillRect(0, 0, map.width, map.height); } function drawLasers() { cxt.beginPath(); cxt.strokeStyle = laser.color; for (var i = 0; i < lasers.length; i++) { var lsr = lasers[i]; if (lsr.y < -laser.height) { lasers.splice(i, 1); continue; } cxt.moveTo(lsr.x, lsr.y); cxt.lineTo(lsr.x, lsr.y - laser.height); cxt.stroke(); lsr.y -= laser.moveInterval; } } function drawShip() { if (ship.left) { ship.x -= ship.moveInterval; } if (ship.right) { ship.x += ship.moveInterval; } if (ship.up) { ship.y -= ship.moveInterval; } if (ship.down) { ship.y += ship.moveInterval; } if (ship.fire) { if (ship.cfirerate === 0) { lasers.push(createLaser(types.left), createLaser(types.right)); ship.cfirerate = ship.firerate; } } if (ship.cfirerate !== 0) { ship.cfirerate--; } cxt.beginPath(); cxt.strokeStyle = ship.color; cxt.moveTo(ship.x, ship.y + (ship.height / 2)); cxt.lineTo(ship.x + (ship.width / 2), ship.y); cxt.lineTo(ship.x + ship.width, ship.y + (ship.height / 2)); cxt.stroke(); } function gameLoop() { drawWorld(); drawShip(); drawLasers(); } setInterval(gameLoop, map.drawInterval) If you see anything that you think is weird, or have a question about what I did, just ask me about it.
{ "domain": "codereview.stackexchange", "id": 564, "tags": "javascript, object-oriented, canvas" }
Vector analysis query
Question: I cannot understand how and why those two expressions are coming (the ones I have highlighted). Please explain. Answer: Assume the component of the velocity is a function of $x$, thus $v(x)$, then, you could write a Taylor expansion around the point P with $x_p$, thus $$ v(x)=v(x_p)+\left.\frac{dv}{dx}\right|_{x=x_p} x + O(x^2), \tag{1}$$ which linearizes the equations. As the quantities at point P in this equations are assumed to be known, you get $$v(x)\approx v_1+\frac{dv_1}{dx} x \tag{2} $$ Now, what do you get when taking $v(\Delta x/2)$ and $v(-\Delta x/2)$? The reasoning behind the linearization, is that the $O(x^2)$ vanished for $\Delta x\to 0$.
{ "domain": "physics.stackexchange", "id": 10078, "tags": "homework-and-exercises, fluid-dynamics, vectors" }
Gravitational lensing and cosmic strings
Question: Say we have a straight cosmic string lying along the $z$-axis, with energy-momentum tensor $$T_{\mu\nu}=\mu\delta(x)\delta(y)\operatorname{diag}(1,0,0,-1)\tag{1}\label{1}$$ for some small positive constant $\mu$. In the linearized Einstein theory, this will contribute a small perturbation to the flat metric, and we can write, in cylindrical polar coordinates $$ds^2=-dt^2+dz^2+dr^2+(1-8\mu)r^2d\phi^2\tag{2}\label{2}$$ We can further change the angular coordinate to get $$ds^2=-dt^2+dz^2+dr^2+r^2d\bar{\phi}^2\tag{3}\label{3}$$ Locally, the metric (\ref{3}) looks like Minkowski spacetime, but globally (\ref{2}) has an angular deficiency. The period of $\bar{\phi}$ is $(1-4\mu)2\pi<2\pi$, so that points at $\phi=0$ and $\phi=(1-4\mu)2\pi$ are identified and we're missing a ''wedge''. Mathematically, this is fine. But physically, what is this wedge, or the lack thereof, and how do we use it to explain gravitational lensing, i.e. the double image of a distant object as observed from behind the cosmic string? Answer: The missing wedge is telling you that the string (which has a mass!) is causing the spacetime to be curved. Now by using this "Minkowski" like chart, you are trying to cover a curved manifold with a flat chart. You can see an toy example of this by taking a piece of paper, cutting out a wedge and gluing the two edges; you obtain a cone, which is not flat. Physically, imagine the following setting: i.e. the string is in between a star and an observer. The metric (3) tells us that the spacetime is flat with a wedge missing, hence in this chart, the light rays move on straight lines. However, we need to identify the sides of the missing wedge, and hence we see that in fact the light rays bend
{ "domain": "physics.stackexchange", "id": 44432, "tags": "general-relativity, spacetime, metric-tensor, gravitational-lensing, cosmic-string" }
Fertilization of the human egg- where does our centrosome come from?
Question: Is there a centrosome in a human egg cell? Is the reason why the egg cell remains paused before meiosis 2 because there isn't a centrosome, and it only divides when the sperm fertilizes it thus it can have a centrosome? If this is so, then how did oogenesis happen? ? Answer: To answer the first part of your question. The sperm actually introduces two centrosomes. The centrosome then nucleates the new microtubule assembly to form the sperm aster — a step essential for successful fertilization. You can visit these sites Simerly, et al as well as Paweltz, et al
{ "domain": "biology.stackexchange", "id": 9341, "tags": "genetics, cell-biology, embryology, meiosis, gamete" }
frequency domain vs cosine wave amplitude
Question: Hey guys, what is the difference between b.) and c.)? Why do we need each of them? Excrept from: http://www.dspguide.com/ch8/5.htm (scroll below) Answer: Why did you cut off the explanation on the figure? Equation 8-2:
{ "domain": "dsp.stackexchange", "id": 886, "tags": "frequency-spectrum, dft" }
Is there an easy way to make PVC from PVA?
Question: Say, I took Elmer's glue (or a similar Polyvinyl Acetate) and I inserted some Chlorine Tabs and warm it up while stirring. Would the electronegative Chlorine turn some of it it into Polyvinyl Chloride (PVC)? Would I get a Dioxin Compound too? Would I get any other toxic Side-Product? Answer: Choring Tablets have positivly charge cholrine atoms you want negative, so chlorine tablets will not work. And no you won't get a dioxin compound unless you burn it, then you may ge some. If you want to make PVC from PVA you can use Hydrochloric acid but you may not get a good yield.
{ "domain": "chemistry.stackexchange", "id": 4655, "tags": "home-experiment, safety, polymers" }
How to setup Intelisense on VS Code for ROS C++
Question: #include "ros/ros.h" is even underlined in green. I have this added to the c_cpp_properties.json file. I also installed the ROS extension (which I'm not sure does anything). { "configurations": [ { "browse": { "databaseFilename": "", "limitSymbolsToIncludedHeaders": true, "path": [ "/home/ben/workspaces/ROS/cir-kit/devel/include", "/opt/ros/kinetic/include", "/home/ben/workspaces/ROS/ackermann/src/twist_to_ackermann-1/include", "/usr/include" ] }, "includePath": [ "/home/ben/workspaces/ROS/cir-kit/devel/include", "/opt/ros/kinetic/include", "/home/ben/workspaces/ROS/ackermann/src/twist_to_ackermann-1/include", "/usr/include", "/usr/include/c++/5", "/usr/include/x86_64-linux-gnu/c++/5", "/usr/include/linux", "/opt/ros/kinetic/include/ros", "/usr/include/x86_64-linux-gnu" ], "name": "Linux", "intelliSenseMode": "clang-x64" } ], "version": 3 } When I hover over top of the include, it gives me an error: #include errors detected. Please update your includePath. IntelliSense features for this translation unit (/home/ben/workspaces/ROS/ackermann/src/twist_to_ackermann-1/src/twist_to_ackermann.cpp) will be provided by the Tag Parser. cannot open source file "x86intrin.h" (dependency of "ros/ros.h") Everything else ROS works... This in Ubuntu 16.04 and ROS Kinetic. Originally posted by horseatinweeds on ROS Answers with karma: 160 on 2018-02-09 Post score: 3 Original comments Comment by Amos on 2018-03-29: I meet the same problem. I find "x86intrin.h" in /usr/lib/gcc/x86_64-linux-gnu/5/include. Add this path to c_cpp_properties.json and #include "ros/ros.h" is OK. Comment by N.N.Huy on 2020-04-09: Have you solved this problem ? Comment by hansolo on 2020-10-02: There are suggested answers to this problem under another question. Answer: It's probably doable to set this up manually, but I would recommend to take a look at wiki/IDEs - Visual Studio Code (VSCode). It has a link to an extension that should take care of all of this for you. Originally posted by gvdhoorn with karma: 86574 on 2018-02-10 This answer was ACCEPTED on the original site Post score: 1 Original comments Comment by Mehdi. on 2021-05-11: "The extension does not require additional configuration" is the biggest lie ever! Comment by gvdhoorn on 2021-05-11: Thanks for the feedback. It would be more valuable if you could give your feedback to Microsoft, on their issue tracker. Your comment here will most likely not be read by many people. Comment by Mehdi. on 2021-05-11: As Samarth mentioned in his answer, this blog post really helps getting it setup correctly.
{ "domain": "robotics.stackexchange", "id": 38834, "tags": "ros-kinetic" }
Why atomic masses aren't integers?
Question: Most of the elements have isotopes, so the atomic masses are calculated depending on the percentage of the existing isotopes. That is clear. However, what about elements that have only one isotope (monoisotopic) - like fluorine? Shouldn't the atomic mass for it be a whole number and not 18.9984? Answer: The short answer is nuclear binding energy, which is the energy needed to disassemble an atom into its subatomic parts (or in some cases the energy released when this happens). The binding energy is a consequence of the strong and weak nuclear forces that hold atoms together. Where does this energy come from? It comes from the mass of the nucleons! What? It is true; most atoms have less mass than the sum of their parts, and that mass defect is converted into the energy that holds them together. If you do not believe me, let us look at one atom of carbon-12, which is used as the definition of the atomic mass unit: $$\mathrm{1\ u}=\dfrac{1}{12}\text{ of the mass of one }\ce{^12C}\text{ atom}$$ Thus, the atomic mass of an atom of $\ce{^12C}$ is by definition $\mathrm{12\ u}$. The atom is constructed from 6 protons, 6 neutrons, and 6 electrons, and the masses of those particles are: particle mass number total mass p 1.00727646681290 u 6 6.0436588008774 u n 1.0086649160043 u 6 6.0519894960258 u e 5.485799094622×10^−4 u 6 0.0032914794567732 u TOTAL: 12.0989397763600 u The sum of the parts of the $\ce{^12C}$ atom are more than the mass of the atom! This mass defect, $\mathrm{0.0989397763600\ u}$ in this case, is the value of the nuclear binding energy when converted to energy using $E=mc^2$. This handy graph from the Wikipedia article linked above shows that the binding energy per nucleon is not the same for all nuclei. If it were, then we would have integer values for monoisotopic nominal masses. However, the consequences of this change in reality could be as drastic as the heavier elements (like iron) not having enough energy to hold themselves together. An additional consequence is that there would be no productive nuclear reactions (fission, fusion, or radioactivity). All nuclear transmutations are driven by conversion of nuclei with lower binding energy per nucleon to nuclei with higher binding energy per nucleon, resulting in a net conversion of mass to energy. Without a difference in binding energy per nucleon, we would not have nuclear bombs (and probably we would be better off as a civilization), but we would also not have nuclear power and radioisotopes for compound labeling, cancer therapy, and medical diagnostics. We would also not have a ready supply of deuterium for NMR solvents, and we might not be able to use variations in isotopic distributions of $\ce{H}$, $\ce{C}$, and $\ce{O}$ to determine all sorts of things like did that "locally grown" food from that restaurant you like actually come for your geographic region?
{ "domain": "chemistry.stackexchange", "id": 3911, "tags": "isotope" }
General methods outlier detection
Question: What are general methods for outlier detection that do not assume any underlying distribution in the data? I have a dataset with the prizes of the rents in London, as well as their location, number of bedrooms, living rooms and bathrooms. I want to identify outliers in this data, where some of the variables are discrete and some of them are continuous. Any ideas on how to do this? Answer: Dbscan seems a great choice for you, look at scikit-learn implementation for further discovery. About being discrete or continuous, it actually doesn't matter, what you have to look at it is if the scale is the best suited for the algorithm in hand (and scikit-learn has algorithms to handle that). Another tip is to actually see if the attributes fit on a distribution, some of them might, and parametric methods of detecting outliers are better suited for the task.
{ "domain": "datascience.stackexchange", "id": 2962, "tags": "machine-learning, statistics, anomaly-detection, outlier" }
How to make a paste out of vinegar?
Question: I am trying to make a smooth vinegar cream for short term use of up to about 3 months (i.e. no preservatives are needed if stored in a cool place). Vinegars such as apple cider vinegar or pure acetic acid tend to be sold as liquids. These liquids are diluted like water and sometimes it's hard to administer them on a tissue; for example, on mushrooms or inside a candy, or as part of a cleaning material or cosmetic appliances. Starches gave a thick, hard paste and sank Pectin or Gum Arabic or Xanthan gum made clusters of solid material paste Cream of tartar worked nice but if I recall correctly the outcome still wasn't "creamy" Aloe Vera gel worked nice, but if I recall correctly it was often too dilute in my opinion How to make a paste out of vinegar? Perhaps Borax? Perhaps some vegan butter such as Shea butter, Cocoa butter, Mango butter, etc.? Answer: Thickeners are sometimes very eccentric. They just need to be coddled. For instance, xanthan will certainly work in your application, if you treat it nicely - or get the right xanthan gum. There are various grades of xanthan gum, even various grades of the food grade gum. One grade is called readily dispersible - but it is made by using formaldehyde, and restrictions on formaldehyde are currently making it difficult to find. The problem with xanthan gum is that it disperses readily in alkaline solutions (pH ~10), but begins swelling in lower pH solutions so fast that the particles become sticky on the surface and clump together so quickly that the inner portions of the particle are then removed from easy access to water which would swell the particle. So, one solution would be to get food grade (is that necessary?) readily dispersible xanthan gum. Another, slightly more complicated, would be to disperse the not-so-readily dispersible xanthan gum in water with a slightly alkaline pH (pH 8 will have 100 times more OH$^-$ ions than H$^+$ ions), until the mixture is smooth (and way too thick!), then dilute with your vinegar and mix thoroughly. I've done this on a large scale (1000 gallons). A little NaHCO3 or even Na2CO3 will get you the pH =8, then the vinegar will totally overcome the alkalinity, and the xanthan gum will be already swollen and thickened. Some thickeners may work better at the low pH of vinegar, like the suggested modified celluloses such as methyl cellulose or hydroxyethyl cellulose, but making a pre-thickened mix and then diluting with vinegar might work better in all cases. In a similar thickening situation, egg albumen coagulates in vinegar. "...by adding vinegar, we get ...increased acidity to help the egg white coagulate and form a solid white". (Ref) So don't bother trying egg white to thicken your vinegar! Ref: https://www.wgtn.ac.nz/science/ask-a-researcher/why-do-we-add-vinegar-when-we-are-poaching-an-egg
{ "domain": "chemistry.stackexchange", "id": 16589, "tags": "acid-base, food-chemistry, medicinal-chemistry, viscosity" }
Minimum computer specs for buying a used computer
Question: So I tried to setup my laptop for school work to dual boot windows and linux only to nearly fuck it up due to windows 8 complicating everything, so I've decided not to take the risk and instead by a cheap used laptop. maximum price is $80 (Im from Denmark, so please do not send me offers) so ofcause the laptop will not be great, but what is the minimum requirements for running linux and ROS? Originally posted by Dynamitetalks on ROS Answers with karma: 61 on 2015-04-25 Post score: 0 Answer: The core ROS libraries will run on just about anything, particularly if it can run Ubuntu. Which packages you want to use and what you want to do with your robot will have a much more significant impact on the CPU and memory requirements. As suggested elsewhere (http://answers.ros.org/question/203712/to-buy-laptop-for-ros/ , http://answers.ros.org/question/61748/laptop-recommendations/ and http://answers.ros.org/question/52679/laptop-recommendation/ ), look for a laptop with an Nvidia graphics card. Given your extremely limited budget, just buy the most powerful laptop you can, and hope it's enough. Originally posted by ahendrix with karma: 47576 on 2015-04-25 This answer was ACCEPTED on the original site Post score: 0
{ "domain": "robotics.stackexchange", "id": 21537, "tags": "ros" }
Observer Transformation in general relativity
Question: Let's say there is a particle moving with 4-velocity $U^{\mu}_{PA}$ in spacetime with respect to observer A and there is another observer B moving with velocity $U^{\mu}_{BA}$. In special relativity, if we wanted to find the velocity of particle with respect to observer B. We would just do Lorentz Transformation. However, in general relativity that is not possible. Sean Carroll in his textbook on general relativity suggests that relative velocity is an ill defined concept. In what context does he mean that? Is it in general hard to define a 4 velocity of objects in curved spacetime Or, the general coordinate transformation between observers is not well defined and we can only transform between observers that are close to each other. If so, then aren't we basically trying to setup local Minkowski spacetime (locally inertial frame) to perform the transformation and assuming that to the direction we take. Does this not imply that the particle whose velocity we wanna transform to another observer's frame of reference needs to be close. What would happen if the observer A, observer B and particle are far away from each other so that we can't setup a locally inertial frame for all of them. I would like to know the mathematical reasons behind why relative velocity is not well defined in General Relativity. Is it because in curved spacetime velocity of particle and velocity of observer B live in different tangent space. Edit: I just found that Gullstrand–Painlevé coordinates are a particular set of coordinates for the Schwarzschild metric – a solution to the Einstein field equations which describes a black hole. The ingoing coordinates are such that the time coordinate follows the proper time of a free-falling observer who starts from far away at zero velocity, and the spatial slices are flat. But when we talk about Lorentz transformation we are talking about transformation that leaves the metric, as is. However these transformations change the metric tensor. So, what do we truly mean by relative velocity is ill defined in general relativity? Does it have something to do with the fact that charts are local and velocities defined using coordinate could become ill defined. Answer: You can always set up locally lorentz frames for any event. You can also define a 4 velocity for any particle trajectory, which takes values in the tangent space at that point. The issue, as you mention, is that they are in different points in space so the velocities live in different tangent spaces. In general, it only makes sense to compare two vectors at the same point. In order to compare vector spaces at different points, you need some invertible linear map between them to identify them. Which identification do you use? The answer is provided by parallel transport/connection. However! The resulting answer depends on the path you use to connect the two points, and curvature measures precisely the amount that these differ with the choice of two paths. The reason it works in special relativity is because the curvature is 0, so it doesn't matter what path we take, we get the same linear transformation. Technically speaking, even nearby points can't really be compared because there's still multiple paths. You can cheat a bit and make things work either when spacetime is almost flat in the vicinity (as we do here on earth), or you can impose canonical paths e.g. if you're at a point $p$, you can compare with points close enough to $p$ s.t. there is a unique geodesic between them. The issue in this second one is you have to change what "close" means point to point (it'll be smaller at places with more curvature). It's probably better to just abandon the notion of relative velocity at different points. I think for the last question, take a locally lorentz frame at each point along the free fall trajectory and use those coordinates? (However, these are not unique because of lorentz transformations)
{ "domain": "physics.stackexchange", "id": 99097, "tags": "general-relativity, differential-geometry, reference-frames, observers" }
Keras: Misunderstanding what Keras does when invoking a Model, providing an Input?
Question: In a class named Generator, I have defined a model with the below method define_model: def define_model(self): conv2d = Generator.__last_block() output = Activation('tanh')(conv2d) model = Model(self.input_layer, output) return model In another file, I am writing the entry-point of my Python program, which calls the generator's define_model method: generator = Generator() generator_model = generator.define_model() input_low_resolution = Input(shape=low_resolution_shape) generated_high_resolution_images = generator_model(input_low_resolution) As I'm reading a course, I however have a queston about this code: what does Keras do when it executes the line generated_high_resolution_images = generator_model(input_low_resolution)? As far I can understand, it doesn't define a model (my model is already defined thanks to generator.define_model()!). And since the Keras methods train, fit or other aren't called, I deduce that this line doesn't train the model. By the way, it's a really weird line for me, because it passes a parameter to a reference (the reference to the object is generator_model and the parameter is input_low_resolution). Normally we pass parameters to the reference's methods (constructor and other methods). Answer: Keras does a lot of stuff when you call a model. I guess that the most important is that it defines the trainable variables and the graph based on the inputs and outputs. For instance actually making any dense layers, and not just the representations. For instance, if you did mod1=generator_model(input_low_resolution) and mod2=generator_model(input_low_resolution), then if you train mod1 then mod2 would not be affected as they have different parameters. So the .define_model just makes a "handle" to your model, which when called actually "builds" it. This functionality is very useful when you want to mix-and-match multiple layer configurations, or working with more complex structures like for instance GANs or ADDA. I would argue that your implementation is not exactly canonical and actually I'd recommend looking at the tensorflow.keras.Model example. Here they introduce a very nice, canonical way of making your own model. Furthermore maybe Keras Model class API and Getting started with the Keras functional API can be of help.
{ "domain": "datascience.stackexchange", "id": 5711, "tags": "python, keras, machine-learning-model, data-science-model" }
Not clear on how to calculate sub-radar latitude
Question: How to find sub-radar latitude in reference to radar telescopes hitting near earth asteroids? Answer: In the common case asteroids will rotate around a nearly fixed axis, like the Earth. That axis of rotation defines a north pole and south pole for that asteroid. The north pole is the one where, when a viewer is seeing the asteroid from above it, the asteroid is rotating counter-clockwise. Once you have north and south poles, you can define an equator and, thus, a system of latitude. I have no idea what is done for asteroids that are undergoing chaotic tumbling. The "sub-radar latitude" is the asteroid's latitude that is directly facing the radar viewer. A good example from the literature that uses the term many times, and includes a short definition, is arXiv:1101.3794. A short excerpt: [...] implying significant change in the object’s sub-radar latitude (the angle between the asteroid-Earth line and the object’s equatorial plane).
{ "domain": "astronomy.stackexchange", "id": 1859, "tags": "telescope, astrophysics" }
Varying the action with respect to the metric in one dimension
Question: I am reading Witten's interesting article What every physicist should know about string theory. On page 39 he gives the general relativistic action in one spacetime dimension of a scalar fields $X_I$, with $I=1\ldots D$: $$ I=\int d t \sqrt{g}\left[\frac{1}{2} \sum_{I=1}^{D} g^{t t}\left(\frac{d X_{I}}{d t}\right)^{2}-\frac{1}{2} m^{2}\right]\tag{1} $$ where $m$ is a constant and $g_{tt}$ is a $1\times1$metric tensor. He then introduces the canonical momentum, $P_{I}=d X_{I} / d t$, and goes onto give the equation of motion which he says is obtained by varying the action $I$ with respect to $g$: $$g^{t t} \sum_{I=1}^{D} P_{I}^{2}+m^{2}=0.\tag{2}$$ I am having trouble deriving this equation. I am assuming that $g=g^{tt}$ and getting $$3g^{t t} \sum_{I=1}^{D} P_{I}^{2}-m^{2}=0.\tag{3}$$ I would be very grateful if someone could explain how to get the correct answer. Answer: Here are the key points, which I'll express for an arbitrary number of spacetime dimensions because that makes the pattern more clear: Inside $\sqrt{g}$, the thing denoted $g$ is the magnitude of the determinant of the metric tensor $g_{ab}$. $g^{ab}$ are the components of the inverse of the metric tensor, defined by the condition $\sum_b g^{ab}g_{bc}=\delta^a_c$. Specialized to one-dimensional spacetime, that general pattern reduces to this: $g^{tt}$ is the inverse of the quantity $g$ inside the square root. To make this explicit, we can write the metric tensor as $g$ (because it only has one component), and then $g^{tt}\equiv g^{-1} = 1/g$. Now that we've deciphered the notation, we can do the calculation. To reduce clutter, I'll write the action as $$ I \propto \int dt\ g^{1/2} (g^{-1} K - m^2) $$ with $K\equiv \sum_n (dX_n/dt)^2$. This gives \begin{align} \frac{\delta I}{\delta g} &\propto (g^{-1} K - m^2)\frac{\delta }{\delta g}g^{1/2} + g^{1/2}\frac{\delta}{\delta g} (g^{-1}K-m^2) \\ &= (g^{-1} K - m^2)\frac{g^{-1/2}}{2} - g^{1/2}g^{-2}K \\ &= -\frac{g^{-1/2}}{2}(g^{-1}K+m^2), \end{align} so setting $\delta I/\delta g=0$ gives the desired equation of motion.
{ "domain": "physics.stackexchange", "id": 75614, "tags": "homework-and-exercises, general-relativity, lagrangian-formalism, action, variational-calculus" }
What is Self-noise in TED algorithm
Question: I am working on a research project. The main task is timing error correction algorithm. I start with studying algorithms which are exist. Currently I have read about Gardner TED algorithm for PSK modulation signals. In the last publication (“A Modified Gardner Detector for Symbol Timing Recovery of M-PSK Signals” or “Symbol-Timing Recovery with Modified Gardner Detectors”) was mentioned a self-noise of TED algorithm. It is written that because of PSK signals are highly bandlimited, the self-noise is appeared. One drawback of the Gardner detector is that it has significant pattern-dependent jitter or self-noise for bandlimited signals. the jitter floor caused by self noise, as it is typical for symbol timing recovery in general,is lowered by nearly two orders of magnitude. Honestly, I have never read about self-noise in contest of TED algorithm. Why does self-noise appear? Is it the same self-noise as it appears in microphone? Sub-Question What is first: TED or downsampling? Is it important what will be first? Answer: In the Gardner Timing Error Detector (TED), "self-noise" is induced from the zero-crossing jitter caused by the inter-symbol interference (ISI) at these locations due to the pulse shaping filter. The "zero-ISI" pulse shaping filters reduce bandwidth with zero-ISI at the symbol sampling locations for data decision, but this is at the expense of increased ISI at the zero-crossing locations which are used by many timing detection algorithms, including the Gardner TED. The inter-symbol interference, meaning the tails of the impulse response of previous symbols changing the zero-crossing location in future symbols, which is due to the pulse shaping filter is specifically the source of "significant pattern-dependent jitter or self-noise for bandlimited signals." The trajectory through the zero-crossings with no other noise contributions (high SNR) depends entirely on the pattern of all possible prior symbol combinations within the memory of the pulse shaping filter that those symbols will pass through in the process of limiting their bandwidth. This is further detailed at this post where it is shown that the error curve (the "S-curve") of magnitude versus timing error in the TED is the waveform at the zero crossing locations (changed in sign properly if the data is going 0 to 1 or from 1 to 0). Thus we see all this effect directly from an eye diagram such as the one posted below where the zero-ISI at the symbol decision for data demodulation is clear, as well as in comparison the substantial jitter at the zero-crossings (which is all due to the pulse shaping filter alone in this plot). Notice specifically in these comparative eye-diagrams that there is timing jitter in the zero-crossings of the receiver waveform. In these plots, the waveform on the left has only gone through one pulse shaping filter: the Root-Raised-Cosine (RRC) filter in the transmitter. This shows that transmitted waveform as received prior to the second RRC in the receiver (for the complete Raised Cosine response with zero-ISI at the symbol sample locations for data demodulation as indicated by all the trajectories passing through the same point in the right hand plot). For this reason a Gardner TED works better when using the waveform prior to the second RRC filtering step in the receiver (the matched filter) unless further pre-filtering is used to eliminate this ISI at the zero-crossing locations. Loop bandwidth in the timing loop can also be considered a source of "self-noise" in the sense that if the loop BW is too wide the loop can remove information content from the modulation if phase modulation is used. This is really removing S from SNR rather than adding N, but the end result is the same that the SNR will be reduced due to the timing loop implementation. I explain this trade further at this post. I also found interesting the noise shaping property of this pattern noise, such that in proximity of the tracking position the noise is shaped such that more noise is filtered out by the tracking loop itself, whereas in acquisition conditions where a timing offset exists the noise is white as demonstrated in the graphic below showing the frequency spectrum of the Gardner TED pattern noise (self-noise) in both cases. The plot on the left shows all possible outputs of the TED versus timing offset, along with the average of all these which is the "S-curve" as the measured error for our timing loop--- as time offset increases to the right, the average error is positive and as time offset decreases to the left, the average error is negative. This error is what is integrated (averaged) in a timing loop which when locked will drive the waveform sampling to be at position A (and as typically done with 2 samples per symbol, the other sample will be at our optimum sample location for data demodulation; the decision sample). The plots on the right are the FFT of the overall noise from the two slices on the left, so represent the frequency spectrum of this pattern noise. Certainly from this plot we see when considering the error on any one sample that the self-noise is HUGE, but this should not be an issue in a properly designed timing loop where we are only concerned with the noise after the loop does it's longer term averaging of this noise (which is considered as part of the loop design together with the overall system requirements).
{ "domain": "dsp.stackexchange", "id": 10244, "tags": "digital-communications" }
Using AC voltage for electroplating
Question: As I know, in electroplating method, the type of voltage used is DC voltage. My question is, does replacing DC current supply by AC one affect the functioning of the cell? And why? Answer: The difference is quite huge. Firstly, the deposition will occur on both electrodes (if possible). Secondly, if the frequency is high enough, there will be no deposition at all as only polarization and depolarization effect take place. The frequency effects mass, transport and electrical double layer. Depending on the electrolyte composition, the dissolution (if possible) will reduce the deposition. Usually pulse electroplating is used. The problematic is huge, please see some articles for more info: An Overview of Pulse Plating - Norman M. Osero and Pulse and pulse reverse plating—Conceptual, advantages and applications, M.S. Chandrasekar, Malathy Pushpavanam
{ "domain": "chemistry.stackexchange", "id": 4028, "tags": "electrochemistry" }
Physics books covering classic mechanics
Question: I am going to be a high school freshman next year and I have acquired a strong interest in physics. I have a mathematical background, upto, but not including, Calculus. I am looking for in depth resources covering classic mechanics enough to move onto more in depth texts on relativity as well as quantum theory. Again, I have a strong math background to all the work leading up to Calculus, and I will be taking Calculus next school year. Answer: When I was in school, I took great pleasure in the lectures by Prof. Walter Lewin, physics professor at MIT, but now retired. His style of lecturing is quite unique and got him rather famous on the internet. All of his undergraduate lectures have been video-taped and are available through MIT's OpenCourseWare program. As for the mathematical prerequisites, Calculus is sort of required. I'm afraid there's not so much you can do without it. But you may just ignore the bits you don't understand yet and review them later. Anyways, classical mechanics is a very useful thing to have in mind when learning about Calculus. It's what Newton developed calculus for! When you mastered classical mechanics, you can just proceed with electrodynamics which is a little tougher on your mathematics. But, again you can just give it a try and -- having the physical application in mind -- you might find multi-variable calculus way more intuitive when learning it with the neccessary mathematical rigor. Regarding quantum mechanics, I wouldn't touch that. Often, when people are discussing quantum mechanics without the proper mathematical tools, they end up talking about things like "wave-particle duality" or (even worse) Schrödinger's cat. This will leave you more confused than before and give you the impression that there's some spooky magic to QM. It's not. It just requires lots of math.
{ "domain": "physics.stackexchange", "id": 14424, "tags": "classical-mechanics, resource-recommendations, soft-question, education" }
Synthesis of trans alkene with organoborane
Question: Trans alkenes can be synthesized with organoboranes. The following sequence is represented in Smith's Organic Synthesis How does the mechanism look like after the first step (hydroboration)? Answer: I think this might be a alkyl shift that results in an $\mathrm{S}_{N}2$ type substitution at an $sp^{2}$ center. Cool. I've only ever seen $\mathrm{S}_{N}2$ type substitution at an $sp^{2}$ center for vinyl iodides. Unfortunately, I don't have my teaching materials on me at work, so I can't find the reference for this for you. EDIT: Note the "inversion" at the $sp^{2}$ center. Greg Fu had this great example of a substitution on a vinyl iodide, like I mentioned above. I can't seem to find this reference, but I just emailed him, so I'll update when he writes me back.
{ "domain": "chemistry.stackexchange", "id": 7008, "tags": "organic-chemistry, reaction-mechanism, synthesis" }
Synchronisation with Subscribers
Question: I would like to have two subscribers in one file. Two subscribers are supposed to subscribe to two different topics, get their respective data and process them. In order to get this working, I need to solve the synchronization issue. I've already look here but it wasn't very helping, since his way of subscribing is not how I do. Here is my code: ros::init(argc, argv, "radarSubscriber"); ros::NodeHandle n("~"); // subscribe to the multibeam ros::Subscriber sub = n.subscribe("/multibeam", 500, multibeamCallback); // subscribe to image_processor ros::Subscriber sub = n.subscribe("/processor", 500, processorCallback); // part I took from the link ------------------------------------------------------------------ typedef sync_policies::ApproximateTime<Image, Image> MySyncPolicy; // ApproximateTime takes a queue size as its constructor argument, hence MySyncPolicy(10) Synchronizer<MySyncPolicy> sync(MySyncPolicy(10), image1_sub, image2_sub); sync.registerCallback(boost::bind(&callback, _1, _2)); // ------------------------------------------------------------------------------------------- What should come there instead of image, image is what I need to know. He was trying to synchronize two cameras, therefore image image works for him. What is it going to be there for me? Should I put the data types of mine (the ones that I subscribe to and get via callback) ? Thanks in advance. EDIT: So after figuring out that the regular subscribers won't work, I used the message filters. However the problem persists. Here is the code that I try to compile: // subscribe to two topics, one for radar beams, one for frame sectors message_filters::Subscriber<sensor_msgs::LaserScan> subMultibeam(n, "multibeam", 1); message_filters::Subscriber<std_msgs::Int32MultiArray> subProcessor(n, "imgProcessor", 1); // synchronize the callback through approximate time approach typedef sync_policies::ApproximateTime<sensor_msgs::LaserScan, std_msgs::Int32MultiArray> MySyncPolicy; Synchronizer<MySyncPolicy> sync(MySyncPolicy(10), subMultibeam, subProcessor); sync.registerCallback(boost::bind(&callback, _1, _2)); and here is a small piece of the error message that I am getting (the whole message is enormously big and I can't paste it here): /opt/ros/hydro/include/message_filters/sync_policies/approximate_time.h: In member function ‘void message_filters::sync_policies::ApproximateTime<M0, M1, M2, M3, M4, M5, M6, M7, M8>::checkInterMessageBound() [with int i = 1, M0 = sensor_msgs::LaserScan_<std::allocator<void> >, M1 = std_msgs::Int32MultiArray_<std::allocator<void> >, M2 = message_filters::NullType, M3 = message_filters::NullType, M4 = message_filters::NullType, M5 = message_filters::NullType, M6 = message_filters::NullType, M7 = message_filters::NullType, M8 = message_filters::NullType]’: /opt/ros/hydro/include/message_filters/sync_policies/approximate_time.h:218:7: instantiated from ‘void message_filters::sync_policies::ApproximateTime<M0, M1, M2, M3, M4, M5, M6, M7, M8>::add(const typename boost::mpl::at_c<typename message_filters::PolicyBase<M0, M1, M2, M3, M4, M5, M6, M7, M8>::Events, i>::type&) [with int i = 1, M0 = sensor_msgs::LaserScan_<std::allocator<void> >, M1 = std_msgs::Int32MultiArray_<std::allocator<void> >, M2 = message_filters::NullType, M3 = message_filters::NullType, M4 = message_filters::NullType, M5 = message_filters::NullType, M6 = message_filters::NullType, M7 = message_filters::NullType, M8 = message_filters::NullType, typename boost::mpl::at_c<typename message_filters::PolicyBase<M0, M1, M2, M3, M4, M5, M6, M7, M8>::Events, i>::type = ros::MessageEvent<const std_msgs::Int32MultiArray_<std::allocator<void> > >]’ /opt/ros/hydro/include/message_filters/synchronizer.h:160:5: instantiated from ‘message_filters::Synchronizer<Policy>::Synchronizer(const Policy&, F0&, F1&) [with F0 = message_filters::Subscriber<sensor_msgs::LaserScan_<std::allocator<void> > >, F1 = message_filters::Subscriber<std_msgs::Int32MultiArray_<std::allocator<void> > >, Policy = message_filters::sync_policies::ApproximateTime<sensor_msgs::LaserScan_<std::allocator<void> >, std_msgs::Int32MultiArray_<std::allocator<void> > >]’ /home/eren/uwsim_ws/src/radar_subscriber/src/RadarSubscriber.cpp:81:79: instantiated from here /opt/ros/hydro/include/message_filters/sync_policies/approximate_time.h:637:117: error: ‘value’ is not a member of ‘ros::message_traits::TimeStamp<std_msgs::Int32MultiArray_<std::allocator<void> >, void>’ /opt/ros/hydro/include/message_filters/sync_policies/approximate_time.h:645:119: error: ‘value’ is not a member of ‘ros::message_traits::TimeStamp<std_msgs::Int32MultiArray_<std::allocator<void> >, void>’ I suspect that the problem is related to the parameters. I looked at the relevant website but there is nothing specified. Originally posted by Jägermeister on ROS Answers with karma: 81 on 2016-03-01 Post score: 0 Answer: You need to add the types that you are receiving in your subscribers. Check what you have in your multibeamCallback and processorCallback. Also, you have two subscribers with the name sub. You need to pass the subscribers to the synchronizer, they should have different names. (As this is actually a redeclaration in your code, I'm guessing the way you have it will not compile anyways...) Edit Whoops. This will not work with regular Subscribers. According to the wiki, you need to add respective message_filters::Subscribers, which definitely makes more sense here... You synchonize, i.e. you only have one callback taking all messages, instead of multiple callbacks. So you need to follow the example you linked... Edit2 To synchronize, your message need to have a std_msgs/Header. This actually contains the TimeStamp. Without, those obviously cannot be synchronized. Thus, Int32MultiArray is actually of the wrong message type... (Didn't think of this before...) Originally posted by mgruhler with karma: 12390 on 2016-03-01 This answer was ACCEPTED on the original site Post score: 1 Original comments Comment by Jägermeister on 2016-03-01: You are right about the variable name sub, thanks, I fixed it. So, I have const sensor_msgs::LaserScan::ConstPtr& and const std_msgs::Int64MultiArray& in these callbacks, respectively. This means I should feed these two in ApproximateTime, I guess. Comment by mgruhler on 2016-03-01: not the ConstPtrs, but the type, i.e. sensor_msgs::Laserscan and std_msgs::Int64MultiArray, I guess... Comment by Jägermeister on 2016-03-01: There seems to be an error at typedef sync_policies::ApproximateTime<sensor_msgs::LaserScan, std_msgs::Int32MultiArray> MySyncPolicy; Synchronizer<MySyncPolicy> sync(MySyncPolicy(10), subMultibeam, subProcessor); as it does not compile when I include the second line. Comment by mgruhler on 2016-03-01: next time please edit your question with the error output... See my edit above... Comment by Jägermeister on 2016-03-01: @ edit2 In the example given in the website, there was no such a header so I didn't put it either. Also, what do you mean by Int32MultiArray was of "wrong message type"? Is there no way in ROS, of synchronizing such simple, primitive two topics? Comment by mgruhler on 2016-03-01: The messages need to contain a header and with this the timestamp, i.e. they need to be stamped. Otherwise, you'll only know when the message arrives, but not when it was sent. Int32MultiArray does not have a header, so no timestamp. So it is not possible to use this for time synchronization Comment by mgruhler on 2016-03-01: You need to use stamped message type, (maybe you need to create you own). This is why it is "wrong" here. As you don't have any guarantees in ROS, when a message is delivered, you need to rely on the timestamp in the header for this. For types without headers, this is thus not possible, afaik.
{ "domain": "robotics.stackexchange", "id": 23955, "tags": "ros, synchronization, callback" }
Navigation Stack Computation from External Computer
Question: Hello everyone! I am working on a ROS Indigo project where we are going to be reading all of our sensor information from an Ubuntu Trusty install on a BeagleBone Black Rev C and sending that information over the appropriate topics (odometry, laser scan topics, etc) to an external computer over the network (also Ubuntu Trusty running ROS Indigo), which will then send back the appropriate twist messages to the base controller node that exists within the workspace on the BeagleBone Black. So, the only nodes that should be running on the BeagleBone are the IMU Node, the SICK LMS wrapper node, and the tf_configuration nodes (We still haven't figured out how to get those set up exactly, so insight on that is also appreciated). I had been following the tutorial here: http://wiki.ros.org/navigation/Tutorials/RobotSetup My only concern is that the only way for me to get my_robot_name_2dnav package to compile correctly with the 'move_base' dependency that is apparently required was for me to install 'ros-indigo-navigation' on the BeagleBone. My concern with this is that we don't want the BeagleBone to be computing any of this information as it is at almost 80% CPU usage just to read and send the LaserScan message from the LIDAR. Am I doing this correctly? Or am I missing something to connect the navigation stack to the external computer that will be able to handle all of the processing necessary? Thanks for all input Originally posted by ronaldh12 on ROS Answers with karma: 23 on 2015-04-03 Post score: 1 Original comments Comment by l0g1x on 2015-04-03: Do you not want move_base installed on the beaglebone due to storage limitations on beaglebone? or do you just not want move_base running on the beaglebone? Comment by ronaldh12 on 2015-04-03: I just do not want move_base running on the beaglebone. It's going to require too much processing power, when we have an external computer to do that. Comment by l0g1x on 2015-04-03: so if you dont care about it being installed, then you can still install move_base on the beaglebone, but just dont run the move_base node on it. instead run it on your external computer. Did you properly setup your ros network ? Comment by ronaldh12 on 2015-04-03: So, what you're saying is that it won't matter that it's on there or not. So, I can technically take that dependency off? Or would that screw something up? And from there I should just run 'roslaunch move_base move_base_0.3_to_0.2.launch' from the external computer? Comment by ronaldh12 on 2015-04-03: Yes, we got that working last week! It's amazing how fast it sends LIDAR information over the network. It's only ~1 sec delay, which is way better than we expected. Comment by ronaldh12 on 2015-04-03: Ah, I just found where it shows to create the move_base.launch file. I think it's finally starting to make a little sense! Answer: Continuing off the comments, I dont know how your system/packaging is setup, so its hard to say if you want to take the dependency off or not. For example, if you have say 1 github repo that you clone to both the beaglebone and the external computer so that they both have the same files, then you probably wouldnt want to remove move_base as a dependency since it will also affect your external computer. Ideally, I think you may want to setup your system in a way similar to how Clearpath organizes there github repo for their jackal robot . You would basically divide the different parts related to the robot into separate repos. For example you could have say two repo's: robot_control (this repo could contain any driver/sensor related items) robot_navigation (this repo would have your move_base related items) That way you could only clone the robot_control repo to your beaglebone (and not even have to install/have move_base on the beaglebone) and clone the robot_navigation repo to your external computer (where you would have move_base listed as a dependency just like they tell you to in the setting up the navigation stack tutorials) If you setup your network correctly, then just run the sensors and robot driving related stuff on your beaglebone, and run move_base on your external computer. You can also look at the roslaunch machine examples to launch nodes on different machines, from a single computer (all on same network) EDIT 1: My understanding of it (may be wrong) is that "jackal" is a common package used amongst all the different types of jackal setups (simulation or real world). Things like the URDF of the robot, and custom messages the robot sends. Jackal_robot is specific to the physical robot itself (boot up/onstart procedures). If you spend some time looking through clearpath's repo's you will learn ALOT. It just takes time and curiosity :) Originally posted by l0g1x with karma: 1526 on 2015-04-03 This answer was ACCEPTED on the original site Post score: 2 Original comments Comment by ronaldh12 on 2015-04-04: Brilliant. Absolutely brilliant. Will update if I run into problems. Thank you so much! Comment by ronaldh12 on 2015-04-04: Could you help distinguish the difference the "jackal_robot" packages and the "jackal" packages? Comment by l0g1x on 2015-10-20: @ronaldh12 The jackal_robot package/repo contains things that are completely specific to the real world jackal robot (i.e. scripts for network setup, robot_upstart, etc..). The jackal package contains common info that can be used across any testing platform (simulation, or real world)
{ "domain": "robotics.stackexchange", "id": 21341, "tags": "ros, navigation, 2d-nav-goal, network" }
Gauge anomalies ruin the unitarity - the explanation involving ghosts
Question: An outline As is known, the presence of gauge anomalies leads to breakdown of the unitarity of the gauge theory. One way to understand this is to involve the BRST quantization of the gauge field theory. It reads by the following way. The gauge invariance describes in fact the redundance of the Hilbert space of gauge variant rays to the space of gauge-invariant rays. In the result, the correct state in gauge theory is defined to be invariant under the gauge transformation. In path integral formulation of the gauge theory, this redundancy is nothing but reduction the integration over all gauge fields configuration to the integration over ones satisfying the gauge fixing condition; the latter defines a surface (gauge orbit) in space of gauge fields configurations. For particular choices of the gauge fixing condition this redundancy leads to generating the ghosts action. The ghoats are unphysical states with indefinite norm in the Hilbert space. Although they mediate the physical processes, they can't be in in- or out- states, so their indefinite norm doesn't make the unitarity to be broken. Their ability to contribute to the physical state is forbidden by Slavnov-Taylor identities; the latter are direct consequence of underlying gauge invariance. If, however, the gauge anomaly is present, then the Slavnov-Taylor identities are broken. Therefore the ghosts contribute in the Hilbert space of physical states, and the unitarity is broken. My question It is always possible to choose the gauge fixing in a way that ghosts don't present. In abelian gauge theories an example is Lorentz gauge. In non-abelian gauge theories, an example is the so-called auxillary gauge. With this choice of gauge fixing conditions, there are no intermediate states with indefinite norm whose presence leads to the violation of the unitarity in a case of the gauge anomaly. So where exactly the unitarity breakdown is hidden in the case of fixing the gauge condition in a way such that the ghosts are absent? In fact, although the gauge invariance says us that all gauge fixing conditions are equivalent, and one might say that the unitarity has to be preserved for all possible choices. However, I may say that the gauge anomaly requires the quantization by using the ghost-free choices of fixing condition, so that the unitarity is preserved (as long as I don't see where the unitarity breakdown is hidden). Answer: In the classical theory, the gauge symmetry is necessary for removing the unphysical deegres of freedom of a given gauge field (sometimes also called ghosts). A massless vector field should only have 2 phyiscal deegres of freedom. Yet, if you think of QED, without the gauge fixing condition the photon still has 3 d.o.f., one of these with zero norm, therefore spoiling unitarity. The decoupling of these unphysical d.o.f. in the quantized theory is ensured by the Ward identites (or the Slavnov-Taylor identites in the non-Abelian case). In QED imposing a gauge condition does not break the conservation of the electromagnetic current, which is a source of the gauge field, and the Ward identities follow directly from the conservation of this current. An anomalous symmetry spoils the conservation of the associated current and the Ward identities break down. Therefore, in an anomalous QED the photon would couple with 3 deegres of freedom to physical processes, which is of course inconsistent with reality. The Faddeev–Popov ghosts you are referring to arise in the non-Abelian case because there the gauge fixing condition breaks the conservation of the associated current automatically and therefore, no Ward identity can be constructed there. In order to remove the unphysical deegre of freedom from our gauge field, we need to modify our Lagrangian appropriatly, which introduces the Faddeev-Popov determinant and therefore the ghosts. These ghost states ensure the decoupling of the unphysical d.o.f. but of course they are themselves not physical as they are only introduced by a choice of gauge fixing. Therefore the Slavnov-Taylor identites guarantee their decoupling as external states. These can also be derived from the conservation of a specific supersymmetric current in the BRST system by the way and an anomaly spoils the conservation of this BRST current. Of course you could also choose a gauge in which the ghosts decouple from the gauge field completely and therefore can just be put in the normalization of the PI. But the price to pay for that is the rather complicated form of the gauge boson propagator which itself has now to guarantee the decoupling of the unphysical d.o.f. . For an explicit calculation see my links below. Moreover, there can be some topological obstacles for choosing this auxilliary gauge globally over a non-trivial base manifold, since this is equivalent to a globally defined section in the gauge bundle (which does not need to exist for non-trivial bundles). Summary: Unitarity breaks down in presence of an Anomaly because the unphysical (zero-norm) deegres of freedom of the gauge field don't decouple anymore. The introduction of Faddeev-Popov ghosts only "shifts" the unphysical d.o.f. to these new states, therefore they have to decouple in an anomaly free theory. Sources: The Problem of the existence of the globally defined section, known as Gribov ambiguity A book in which the calculation of the propagator for the axial gauge and the decoupling of unphysical d.o.f. of the gauge boson is shown A paper which i wasn't able to find in the internet by Kummer, W. (1976) Acta Phys. Austriaca Suppl. XV, 423.
{ "domain": "physics.stackexchange", "id": 36396, "tags": "quantum-field-theory, gauge-invariance, quantum-anomalies, unitarity, ghosts" }
How to add a tool from a STL file
Question: Hello, I'm new at ROS (kinetic 1.12.17, Ubuntu 16.04 LTS) and I want to know how can I add a tool to a robot, having the tool in STL format. I'm working with the Robotnik's RB-Kairos robot with the UR10 arm and I am lost with all the URDF files that are in the packages (you can see my workspace at https://github.com/DavidRedrejo/test_ws/tree/main) I want to add the tool.stl (found in my main branch) to the robot xacro (access here) but I don't know where and how I have to add the tool in the robot's xacro file. The tool is suposed to be a "static holder" for a real tool, but I think that it is not necesary to represent that tool, just the "holder piece". Hope somebody could help me. Many thanks for your time! EDIT: I have made an URDF file for the tool, it looks like this: <?xml version="1.0"?> <robot name="spray_tool" xmlns:xacro="http://www.ros.org/wiki/xacro"> <xacro:macro name="spray_tool" params="prefix parent *origin "> <joint name="${prefix}_base_joint" type="fixed"> <origin xyz="0 0 0" rpy="0 0 0"/> <parent link="${parent}"/> <child link="${prefix}_base_link"/> </joint> <!-- BASE LINK --> <link name="${prefix}_base_link"> <inertial> <mass value="0.25" /> <origin xyz="0 0 0" /> <inertia ixx="1.0" ixy="0.0" ixz="0.0" iyy="1.0" iyz="0.0" izz="1.0" /> </inertial> <visual> <origin xyz="0.0 0.0 0.0" rpy="0.0 0.0 0.0" /> <geometry> <mesh filename="package://spray_tool_description/meshes/tool.stl" scale="10 10 10"/> </geometry> <!-- <material name="grey"> <color rgba="0.5 0.5 0.5 1"/> </material> --> </visual> <collision> <origin xyz="0.0 0 0" rpy="0 0 0" /> <geometry> <!--box size="0.146 0.05 0.0735"/--> <mesh filename="package://spray_tool_description/meshes/tool.stl" scale="10 10 10"/> </geometry> </collision> </link> </xacro:macro> </robot> And in the rbkairos.urdf.xacro I also added the include line at the top and this other line. When I run the simulation, there is no tool, but the link rbkairos_spray_tool_base_link is created, as seen in this frame. (Sorry, I don't have points to post pics here!) So, the frame is correct but there is no visual display? Or any file is not properly done? Originally posted by DavidRedrejo on ROS Answers with karma: 3 on 2021-07-21 Post score: 0 Answer: I'd highly recommend reading through the URDF tutorials to understand better what is inside a URDF or xacro file. In this case, as the robot has been mostly defined already, and you just need to add the tool, you will want to add a link. You can either do this directly in the rbkairos.urdf.xacro, as a child of the <robot> tag, or you can define the tool in a separate xacro file (e.g. my_tool.xacro) and include it, as is done here. The latter is probably the cleaner approach but they are functionally equivelent. includeing an xacro file just inserts it into the final parsed file. Your '' will need the following <visual> and <collision> tags. These can have identical contents if you want to use the same .stl file for both. Sometimes it makes more sense to use simplified collision models, but this depends on your application/models. The end of this tutorial shows you what your visual tag will need to import a mesh model a <joint> definition; the joint defines just how and where your .stl model fits onto the robot? is it a fixed connection, can it rotate, if so how? if you are simulating your robot, you will also need an <inertial> block for the sake of the physics engine You can quickly to test to make sure your .xacro file compiles properly using the commandline interface (e.g. xacro myfile.xacro). Feel free to update your question with your attempt if you have issues, and I can try to help Originally posted by shonigmann with karma: 1567 on 2021-07-21 This answer was ACCEPTED on the original site Post score: 0 Original comments Comment by DavidRedrejo on 2021-07-29: Hi @shonigmann , thanks for your reply and your help! I updated the question with my attempt and there might be something I'm missing but I don't see it. If I need to post more info let me know! Many thaks! Comment by shonigmann on 2021-07-29: are there any hints in the console output? if you look in the gazebo log files (on ubuntu, they are in ~/.gazebo/server-XXXXX/default.log, ~/.gazebo/client-XXXXX/default.log, and ~/.gazebo/ogre.log), are there any hints? My best guess is that the CAD model you specified cannot be found be Gazebo, indicating either that the models cannot be found on the Gazebo model path or that the path you provided cannot be resolved. If you replace the <mesh> with a <box> can you at least see the box when you load the model? Comment by DavidRedrejo on 2021-07-29: In the log files there is nothing related to xacro files or stl. Now is fixed and working fine. The problem was the scale param in the tag. I thought that the scale was to specify a factor to multiply all dimensions, but scale is the relation between the dimension of the object and the dimension in the simulation (in my case, the .stl was defined in mm and the simulation is 1 m, so my scale param is -> scale="0.001 0.001 0.001") I will set your answer as correct because the main question was solved with that. Thanks!
{ "domain": "robotics.stackexchange", "id": 36737, "tags": "urdf, ros-kinetic, xacro, ur10, stl" }