anchor stringlengths 0 150 | positive stringlengths 0 96k | source dict |
|---|---|---|
Dynamic array type using void pointers C | Question: I made my own dynamic array type using void pointers. I'd like to know what you think of my implementation. void pointers are nice but I fear they may be inefficient. The compiler cannot tell what you are doing because it has no size information. Either way i like the flexibility and the fact that i do not have to use macros as much. But what are your thoughts on this? I am still working on it and am still finding some minor bugs here and there. It has been a fun project to work on.
main.c
#include "array.h"
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
void log_int(void *p);
int main(int argc, char const *argv[])
{
/* declare an integer array. */
int *a = array_alloc(sizeof(*a), log_int);
array_reserve(a, 32);
/*
append a list 1 - 9
fixed functions usually take in local arrays
or variadic macro arguments.
*/
array_give_fixed(a, 1, 2, 3, 4, 5, 6, 7, 8, 9);
/*
array_take usually stores the values taken
from an array into a buffer.
Passing NULL causes each elelemnt taken to have
its destructor called.
*/
array_take(a, NULL, 3);
int i[2];
/* array_take returns the number of elements taken. */
for(; array_take_fixed(a, i); )
{
fprintf(stdout, "%d ", i[0]);
fprintf(stdout, "%d\n", i[1]);
}
/*
array_free calls each elements destructor
and frees the array itself.
*/
array_free(a);
return 0;
}
void log_int(void *p)
{
int *i = p;
fprintf(stderr, "integer popped %d\n", *i);
}
array.h
#ifndef ARRAY_H
#define ARRAY_H
#include <assert.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#define COUNT(a) (sizeof(a) / sizeof *a)
typedef struct {
void (*freeElem)(void *);
size_t szElem;
size_t ctElem;
size_t cpElem;
} Array_Header;
void *array_alloc(size_t szElem, void (*freeElem)(void *));
void array_free_impl(void *a);
void *array_reserve_impl(void *a, size_t capacity);
void *array_push_impl(void *a, const void *elem);
void array_pop(void *a);
void *array_give_impl(void *a, const void *elems, size_t n);
size_t array_take(void *a, void *elems, size_t n);
size_t array_szElem(const void *a);
size_t array_ctElem(const void *a);
size_t array_cpElem(const void *a);
#define array_reserve(a, capacity) do { a = array_reserve_impl(a, capacity); } while(0)
#define array_push(a, elem) do { a = array_push_impl(a, elem); } while(0)
#define array_give(a, elems, n) do { a = array_give_impl(a, elems, n); } while(0)
#define array_give_fixed(p, ...) \
do { \
p = array_give_impl( \
p, &(__typeof__(*p)[]){__VA_ARGS__}, \
sizeof((__typeof__(*p)[]){__VA_ARGS__}) \
/ sizeof(__typeof__(*p))); \
} while(0)
#define array_take_fixed(p, a) \
array_take(p, a, sizeof(a) / sizeof(*a))
#define array_free(a) do { array_free_impl(a); a = NULL; } while(0)
#endif /* ARRAY_H */
array.c
#include "array.h"
void *array_alloc(size_t szElem, void (*freeElem)(void *))
{
void *a = malloc(sizeof(Array_Header));
Array_Header *header = a;
header->freeElem = freeElem;
header->szElem = szElem;
header->ctElem = 0;
header->cpElem = 0;
return header + 1;
}
void array_free_impl(void *a)
{
assert(a);
Array_Header *header = (Array_Header *)a - 1;
if(header->freeElem)
{
unsigned char *begin = a;
unsigned char *end = begin + header->ctElem * header->szElem;
for(; begin != end; begin += header->szElem)
{
header->freeElem(begin);
}
}
free(header);
}
void *array_reserve_impl(void *a, size_t capacity)
{
assert(a);
Array_Header *header = (Array_Header *)a - 1;
if(capacity > header->cpElem)
{
header->cpElem = capacity;
header = realloc(header, sizeof(*header) + header->cpElem * header->szElem);
a = header + 1;
assert(header);
}
return header + 1;
}
void *array_push_impl(void *a, const void *elem)
{
assert(a);
assert(elem);
Array_Header *header = (Array_Header *)a - 1;
if(header->ctElem + 1 > header->cpElem)
{
header->cpElem = (header->cpElem + 1) * 2;
header = realloc(header, sizeof(*header) + header->cpElem * header->szElem);
a = header + 1;
assert(header);
}
memcpy((unsigned char *)a + header->ctElem * header->szElem, elem, header->szElem);
++header->ctElem;
return header + 1;
}
void array_pop(void *a)
{
assert(a);
Array_Header *header = (Array_Header *)a - 1;
if(header->ctElem > 0)
{
--header->ctElem;
if(header->freeElem)
{
unsigned char *p = (unsigned char *)a + header->ctElem * header->szElem;
header->freeElem(p);
}
}
}
void *array_give_impl(void *a, const void *elems, size_t n)
{
assert(a);
assert(elems);
Array_Header *header = (Array_Header *)a - 1;
if(header->ctElem + n > header->cpElem)
{
header->cpElem = (header->cpElem + n) * 2;
header = realloc(header, sizeof *header + header->cpElem * header->szElem);
a = header + 1;
assert(header);
}
memcpy((unsigned char *)a + header->ctElem * header->szElem, elems, n * header->szElem);
header->ctElem += n;
return header + 1;
}
size_t array_take(void *a, void *elems, size_t n)
{
assert(a);
Array_Header *header = (Array_Header *)a - 1;
// if(header->ctElem >= n)
n = n > header->ctElem ? header->ctElem : n;
{
header->ctElem -= n;
if(elems)
{
memcpy(elems, (unsigned char *)a + header->ctElem * header->szElem, n * header->szElem);
}
else
{
unsigned char *begin = (unsigned char *)a + header->ctElem * header->szElem;
unsigned char *end = begin + n * header->szElem;
for(; begin != end; begin += header->szElem)
header->freeElem(begin);
}
}
return n;
}
size_t array_ctElem(const void *a)
{
assert(a);
Array_Header *header = (Array_Header *)a - 1;
return header->ctElem;
}
size_t array_cpElem(const void *a)
{
assert(a);
Array_Header *header = (Array_Header *)a - 1;
return header->cpElem;
}
size_t array_szElem(const void *a)
{
assert(a);
Array_Header *header = (Array_Header *)a - 1;
return header->szElem;
}
```
Answer: Much feedback on the recent Array List C implementation applies here too.
() around macro parameters
Good practice to enclose a macro parameters with (), yet a still remain problematic. Example:
// #define array_push(a, elem) do { a = array_push_impl(a, elem); } while(0)
#define array_push(a, elem) do { a = array_push_impl((a), (elem)); } while(0)
Check allocation success
void *a = malloc(sizeof(Array_Header));
if (a == NULL) return NULL; // add
.h file: only code necessary #include for the headers.
#include <assert.h> and perhaps others not needed in the .h file. Remove them.
Unnecessary struct
typedef struct { ... } Array_Header; not needed in the .h file. Move to the .c file.
Even better, re-define functions to use a pointer to Array_Header.
Array alignment
As OP wants to use header + 1 as the start of an array of any type, additional padding may be needed in Array_Header. Since *alloc() returns a pointer good for all alignments, to make certain header + 1 is also aligned for any type use a FAM of type max_align_t (which is an object type whose alignment is the greatest fundamental alignment).
typedef struct {
void (*freeElem)(void *);
size_t szElem;
size_t ctElem;
size_t cpElem;
max_align_t a[]; // Add
} Array_Header; | {
"domain": "codereview.stackexchange",
"id": 44487,
"tags": "c, array, pointers"
} |
What do we know about properties of individual stars in the Andromeda Galaxy? | Question: According to the answers to Visible Stars in Andromeda Galaxy, is it possible to distinguish between different stars in Andromeda galaxy. What I am curious about is how much information are we able to extract about physical details of these objects. What can we tell about such distant stars? Can we resolve their individual masses and types?
Do we know, for example, what is the biggest star within M31? Or perhaps can we tell something only about a couple of special cases (such as variable stars, binary systems or massive giants) and information about individual "standard" stars is lost in the blur?
Answer: Basically, we can determine the same information that we are able to extract from the spectrum of a nearby star, as long as the star in M31 can be spatially resolved.
You can find that information yourself, by using SIMBAD:
http://simbad.cds.unistra.fr/simbad/sim-basic?Ident=M31&submit=SIMBAD+search
Then scrolling down a bit, following the hierarchy link to "Children" (Warning: It'll take a while). In the list stars are denoted by "*" | {
"domain": "astronomy.stackexchange",
"id": 7012,
"tags": "star, galaxy, m31"
} |
Violation of the law of energy conservation between photosynthesis and respiration? | Question: For the production of one glucose molecule in the Calvin cycle a plant uses 18 molecules ATP, but when the same glucose molecule is oxidised — first in the cytoplasm and then in the mitochondrion — it can obtain approximately 36-38 molecules ATP. How is the theory of conservation of energy maintained here?
Answer: The Fallacies in the argument
The question contains two main fallacies (some would say sleights of hand) in the energetic comparison of glucose synthesis from CO2 in the Calvin cycle and glucose oxidation via glycolysis, the tricarboxylic acid cycle and the electron transport chain:
The descriptions of the two reactions are incomplete — important co-substrates are ignored.
The formulation of the question seems to assume that the energetics of a biochemical reaction series is reflected solely in the
interconversion of ATP and ADP, rather than the free energy changes
that occur in all associated reactions.
Detailed explanation
To make a valid comparison of the thermodynamics of glucose oxidation to CO2 with its synthesis form CO2 we have to consider the single reversible reaction:
(The ‘6H’ may appear rather odd, but it is accounted for in the reduction of cofactors etc.. We cannot include oxygen in the equation because it is not involved chemically in the synthesis of glucose. Therefore for this treatment the electron transport chain is not included, although it is discussed later.)
The reaction from left to right is associated with a certain decrease in Gibbs free energy (ΔG) and that from right to left in a corresponding increase of the same value. In a non-biological context this might involve the evolution and use of heat energy, but in the cell it involves the chemical energy of bonds between atoms. We therefore have to consider all the chemical reactions to which the reaction above is coupled, i.e. which receive or transfer free energy. The additional reactions and the free energy changes they involve can be found in the chapters on glycolysis, the tricarboxylic acid cycle and the Calvin cycle in Berg et al. and are (omitting water, hydrogen ions and inorganic phosphate):
From which it can be seen that the chemical energy input required for synthesis is greater than that obtained, contrary to what is asserted in the question
What about the ATP?
Yes, the cell oxidizes NADH and FADH2 and uses the free energy change to build an electrochemical gradient, the dissipation of which generates ATP (30 molecules per glucose molecule is the current estimate). However that is separate process, with no counterpart in the Calvin Cycle, where the NADPH is generated from photosynthetic reduction and not from ATP. However, if you perform a naive ‘currency conversion’ at the rate of 3ATP per NAD(P)H and 2ATP per FADH2, the balance is: input for synthesis 54 ATP, output from oxidation 38 ATP, i.e. the same general result as above.
Popularizing science is not easy, and those who make the effort can be excused for equating energy with ATP (often as clip-art lightening flashes). However if you wish to study metabolism you need to think scientifically about chemical thermodynamics and free energy. The fact that the hydrolysis of ATP to ADP is accompanied by a decrease in Gibbs free energy is not particularly chemically remarkable in the context of the free energy changes of other chemical conversions in the cell (including NAD(P) oxido-reductions). What is remarkable is that the cell has evolved enzyme that catalyse reactions in which the free energy change from this conversion is not lost as heat, but can be used to offset the +ve ΔG of a reaction to which it is coupled. Reactions involving NAD(P)H do the same thing, but they are limited to reductions, and the free energy change is inconveniently large.
Why the accepted answer to this question is a red herring
The accepted answer addresses a different question to that posed. It cites a calculation of the photon energy required for converting carbon dioxide to glucose. This is irrelevant to the thermodynamics of the Calvin Cycle, which can occur in the dark. It is relevant only to the efficiency of the use of photon energy in phosphorylating ADP and reducing NADP+ (incidentally in an open system) — a distinct reaction. The thermodynamics of the reactions the question refers to — glucose / carbon dioxide interconversion processes — do not involve the generation of their co-substrates. | {
"domain": "biology.stackexchange",
"id": 6702,
"tags": "biochemistry, metabolism, photosynthesis, cellular-respiration"
} |
What is the most stable conformation (Newman Projection) of CF3CH3? | Question: Is a totally eclipsed Newman projection the most stable conformation for a 2 carbon compound, with one carbon bonded to more electronegative atoms (making them partially negative), and the other carbon with something less electronegative (making it partially positive)?
For example, let's imagine $\ce{CF3CH3}$. If they are eclipsed, the fluorine side is partially negative and the hydrogen side is partially positive, so wouldn't they attract?
Or do the electron clouds always repel each other, no matter what atom is used (making a staggered conformation the most stable, not eclipsed)?
Answer: For $\ce{H3C-CF3}$, the stable conformation is staggered. I would expect this to be case for any similar, freely rotating molecule (the exception being conformations imposed by rings etc.), with the reason being the repulsion between the $\ce{C-F}$ and $\ce{C-H}$ bonds.
To confirm, I have optimized the molecular geometry on different levels of quantum chemistry, starting from a conformer halfway between staggered and eclipsed. In all cases, I have observed convergence to the staggered conformer. The levels are
HF/def2-TZVP
PBE-D3/def2-TZVP
B3LYP-D3/def2-TZVP
PW6B95-D3/def2-TZVP
PW6B95-D3/def2-QZVP
In response to comment: I have performed PW6B95-D3/def2-TZVP optimizations for propene, acetaldehyde, and propanaldehyde (the latter starting from two different conformers). The result is that in all cases, the double bond is eclipsed by a single bond (and two non-equivalent conformers exist for propanaldehyde). | {
"domain": "chemistry.stackexchange",
"id": 13051,
"tags": "organic-chemistry, stereochemistry, conformers"
} |
Maxwell's equation for non inertial observer | Question: Applying Maxwell's equation we can prove that light will move at the speed of light for every inertial frame, is it true as well for non-inertial frames?
How light moves slowly near a black hole??
Answer: These are two separate questions.
1) Does light move at c in non-inertial frames?
First to get a bit pedantic, I assume you mean the coordinate velocity. As light does not have a well defined proper velocity, this seems to be the most reasonable way to interpret your question here.
While rewriting Maxwell's equations in a non-inertial frame, and then solving this potential mess can be difficult, we can easily take a solution in an inertial frame and just do the coordinate transformation to get the solution in some non-inertial frame. In particular, if we have the position of some light wave packet vs time in an inertial frame, we can just apply the coordinate transformation to find the new coordinate labels for the points on its path.
Consider the inertial frame with coordinates $t,x,y,z$. A simple example of a non-inertial coordinate system $t',x',y',z'$ is be given by the transformation:
$$t'=t,\ x'=x+vt,\ y'=y,\ z'=z$$
This Galilean transformation gives us a non-inertial frame, and the coordinate velocities will just change by $v$ in the $x$ direction as is usual with Galilean transformations. So if in the inertial frame the velocity of the light packet was $c$ in the $x$ direction, the velocity in this new frame would be $c+v$. This is probably the easiest counter example to see.
2) How light moves slowly near a black hole?
Now you are talking about curved spacetime as well. In curved spacetime, there is no choice of coordinate system in which the coordinate speed of light will everywhere be c.
However, locally we can still choose a coordinate system in which that is the case. A free falling frame is such an example, and so if you fall into a black hole nothing strange will be seen with your local measurements of the speed of light. When people say light "slows" while approaching a blackhole, they are again talking about the coordinate speed of light, and they are implicitly assuming a coordinate system (usually the
Schwarzschild coordinates). Light slowing to a stop in the limit of approaching the event horizon is just an artifact of a coordinate singularity in this particular choice of coordinate system. This singularity is only an issue with the coordinate choice and can be completely removed if another coordinate system is used. A very similar coordinate singularity occurs even in flat space-time in a common choice of coordinate system with the spatial origin having a constant proper acceleration (Rindler coordinates). | {
"domain": "physics.stackexchange",
"id": 16498,
"tags": "electromagnetism, special-relativity, black-holes, reference-frames"
} |
Shortest and most efficient way for an endless image/content rotation in Vanilla JS and jQuery | Question: I wanted to create the most shortest JavaScript code for an endless rotation of elements, without any controls. The elements can be images or SVG-elements (only the Vanilla JS version will work here) or text-container. It doesn't matter. I came up with this:
HTML
<div id="container" class="container">
<img src="http://placehold.it/300&text=1" alt="">
<img src="http://placehold.it/300&text=2" alt="">
<img class="active" src="http://placehold.it/300&text=3" alt="">
<img src="http://placehold.it/300&text=4" alt="">
</div>
CSS
.container > img {
opacity: 0;
position: absolute;
-webkit-transition: all 1s linear;
transition: all 1.5s linear;
}
.container > img.active {
opacity: 1;
}
Vanilla JS
var e = document.getElementById('container');
if (e) {
setInterval(function() {
var current = container.getElementsByClassName('active')[0];
current.classList.remove('active');
var next = current.nextElementSibling || container.firstElementChild;
next.classList.add('active');
}, 2000);
}
Try it here
jQuery
var images = $('.container > img');
if (images.length) {
setInterval(function() {
var current = images.filter('.active');
current.removeClass('active');
var next = ( current.next().length ? current.next() : images.eq(0) );
next.addClass('active');
}, 2000);
}
Try it here
Is there anything that can be improved? I'm especially curious about the first line within each interval which involve a request (getElementsByClassName, Vanilla JS) or a loop (.filter, jQuery).
Answer: Eliminating functional differences between Vanilla JS and jQuery versions
Your vanilla script and your jQuery script are not functionally equivalent. Your vanilla is selecting the container by its ID, while the jQuery is selecting the container by its class and then selecting the images. Your vanilla script would more closely resemble the jQuery by using document.querySelector() and/or document.querySelectorAll().
e.g.: var images = document.querySelectorAll('.container > img');
Making it work for multiple containers
Your code will function strangely if more than one image rotator container is on the page: the jQuery will grab image elements from all the containers and only set one to active at a time, while the vanilla javascript will only change elements in the first container it grabs.
(EDIT: As you noted in the comments, you can work around this by selecting the different containers by ID before passing them to your function, but that approach requires manually specifying IDs both in the HTML and in the JavaScript. Ideally, you'd be able to just add more elements with the appropriate classes in the HTML to take advantage of the existing image rotator functionality.)
With a little tweaking, you can ensure your code processes each container separately (allowing each container to have its own active image).
var containers = document.querySelectorAll('.container');
for (var i = 0; i < containers.length; i++) {
beginRotation(containers[i]);
}
function beginRotation(container){
setInterval(function() {
var current = container.querySelector('.active');
current.classList.remove('active');
var next = current.nextElementSibling || container.firstElementChild;
next.classList.add('active');
}, 2000);
}
The above code first grabs all the elements with the container class, then sets the interval to run on each of them.
Minor nitpick(s)
You don't need that semicolon at the end of your if(e){ ... } statement in your vanilla js example.
Supporting older browsers (if you have to)
IE9 and below do not support classList, so if you want to support them you'll have to use the className property.
To remove a class, something like this would work:
current.className = current.className.replace("active", "").trim();
and its complement to add a class:
next.className = (next.className + " active").trim();
Note that this only really works because we know what to expect in the class names. This would break, for example, if you had an element with a class my-active-class because it would replace the substring active. A better (but more tedious) approach would be to split className into an array (using element.className.split(" ")) and check each array element for the desired class.
You'd also want to be aware that although IE9 supports CSS opacity, it doesn't support CSS transitions. The end result is that the images will transition promptly instead of fading in and out for IE9 users.
Working Example
var containers = document.querySelectorAll('.container');
for (var i = 0; i < containers.length; i++) {
beginRotation(containers[i]);
}
function beginRotation(container) {
setInterval(function() {
var current = container.querySelector('.active');
current.className = current.className.replace("active", "").trim();
var next = current.nextElementSibling || container.firstElementChild;
next.className = (next.className + " active").trim();
}, 2000);
}
.container > img {
opacity: 0;
position: absolute;
-webkit-transition: all 1s linear;
transition: all 1.5s linear;
}
.container > img.active {
opacity: 1;
}
<div class="container">
<img src="http://placehold.it/300x100&text=1" alt="" />
<img src="http://placehold.it/300x100&text=2" alt="" />
<img class="active" src="http://placehold.it/300x100&text=3" alt="" />
<img src="http://placehold.it/300x100&text=4" alt="" />
</div>
<div style="height:100px"> </div>
<div class="container">
<img class="active" src="http://placehold.it/300x100&text=1" alt="" />
<img src="http://placehold.it/300x100&text=2" alt="" />
<img src="http://placehold.it/300x100&text=3" alt="" />
<img src="http://placehold.it/300x100&text=4" alt="" />
</div>
EDIT: Efficiency Suggestion(s)
Caching DOM Queries
You can make the code in setInterval more efficient by getting all the necessary DOM operations out of the way beforehand.
If you want to reduce the number of DOM queries (which are typically expensive operations) you can cache an array of references to all the relevant <img> elements and then reference that array in the setInterval function.
I know you're hoping for the "shortest and most efficient way", but this would be for efficiency at the expense of brevity, since you'd need to set up an extra variable to track the index of the active image. Using this index would save your code from having to read the current classLists of the images, requery the container element to get the active image, and traverse the DOM for the next sibling or first child.
var containers = document.querySelectorAll('.container');
for (var i = 0; i < containers.length; i++) {
beginRotation(containers[i], containers[i].querySelectorAll('img'));
}
function beginRotation(container, imgs) {
var i = getIndexOf(container.querySelector('.active'), imgs);
setInterval(function () {
imgs[i++].classList.remove('active');
imgs[(i == imgs.length ? i = 0 : i)].classList.add('active');
}, 2000);
}
function getIndexOf(target, arr) {
var len = arr.length;
while (len--) {
if (target == arr[len]) {
return len;
}
}
} | {
"domain": "codereview.stackexchange",
"id": 13344,
"tags": "javascript, jquery"
} |
Care to review my immutable singly linked list? | Question: public sealed class SinglyLinkedList<T> : IEnumerable<T>
{
readonly static SinglyLinkedList<T> _empty = new SinglyLinkedList<T>();
readonly bool _isEmpty;
readonly T _head;
readonly SinglyLinkedList<T> _tail;
private SinglyLinkedList()
{
_isEmpty = true;
}
private SinglyLinkedList(T head)
{
_isEmpty = false;
_head = head;
}
private SinglyLinkedList(T head, SinglyLinkedList<T> tail)
{
_isEmpty = false;
_head = head;
_tail = tail;
}
public static SinglyLinkedList<T> Empty
{
get { return _empty; }
}
public int Count
{
get
{
var list = this;
var count = 0;
while (!list._isEmpty)
{
count++;
list = list._tail;
}
return count;
}
}
public bool IsEmpty
{
get { return _isEmpty; }
}
public T Head
{
get
{
if (_isEmpty)
throw new InvalidOperationException("The list is empty.");
return _head;
}
}
public SinglyLinkedList<T> Tail
{
get
{
if (_tail == null)
throw new InvalidOperationException("This list has no tail.");
return _tail;
}
}
public static SinglyLinkedList<T> FromEnumerable(IEnumerable<T> e)
{
if (e == null)
throw new ArgumentNullException("e");
return FromArrayInternal(e.ToArray());
}
public static SinglyLinkedList<T> FromArray(params T[] a)
{
if (a == null)
throw new ArgumentNullException("a");
return FromArrayInternal(a);
}
public SinglyLinkedList<T> Append(T value)
{
var array = new T[Count + 1];
var list = this;
var index = 0;
while (!list._isEmpty)
{
array[index++] = list._head;
list = list._tail;
}
array[index] = value;
return FromArrayInternal(array);
}
public SinglyLinkedList<T> Prepend(T value)
{
return new SinglyLinkedList<T>(value, this);
}
public SinglyLinkedList<T> Insert(int index, T value)
{
if (index < 0)
throw new ArgumentOutOfRangeException("index", "Cannot be less than zero.");
var count = Count;
if (index >= count)
throw new ArgumentOutOfRangeException("index", "Cannot be greater than count.");
var array = new T[Count + 1];
var list = this;
var arrayIndex = 0;
while (!list._isEmpty)
{
if (arrayIndex == index)
{
array[arrayIndex++] = value;
}
array[arrayIndex++] = list._head;
list = list._tail;
}
return FromArrayInternal(array);
}
public IEnumerator<T> GetEnumerator()
{
var list = this;
while (!list._isEmpty)
{
yield return list._head;
list = list._tail;
}
}
public override string ToString()
{
if (_isEmpty)
return "[]";
return string.Format("[{0}...]", _head);
}
IEnumerator IEnumerable.GetEnumerator()
{
return GetEnumerator();
}
static SinglyLinkedList<T> FromArrayInternal(T[] a)
{
var result = Empty;
for (var i = a.Length - 1; i >= 0; i--)
{
result = result.Prepend(a[i]);
}
return result;
}
}
Answer:
The constructor with one argument is never used and doesn't make much sense (list with a head but no tail?).
You shouldn't use Count in your own methods unless absolutely necessary, because it's O(n). Or you should cache the result in a field.
Because you use Count so often, your Insert() walks through the whole list three times! The second time it's because of completely unnecessary Count. You should completely rewrite it, Insert() can be done in O(index), which can be much better than your O(n). | {
"domain": "codereview.stackexchange",
"id": 1484,
"tags": "c#, linked-list, immutability"
} |
Reinterpreting a string, treating < as a backspace character | Question: I've come up with a solution to this coding challenge using recursion. In summary, the input is a string where the < symbol represents a backspace (up to 1 million characters). The output should be a string where all characters that come before a backspace or series of backspaces are removed, as well as the backspaces themselves. E.g.:
"a<bc<" --> "b"
"foss<<rritun" --> "forritun"
Here is my solution to the problem, in Java:
import java.util.*;
public class Main {
public static void main(String[] args) {
Scanner scanner = new Scanner(System.in);
String broken_str = scanner.nextLine();
System.out.println(fix(broken_str));
}
public static String fix(String broken_str) {
int brack_idx = broken_str.indexOf('<'); // index of the first occurrence of '<'
if (brack_idx < 0) {
// if '<' isn't found then the string is fixed
return broken_str;
}
int num_brax = 0; // keeps track of the number of brackets in the current sequence
while (brack_idx < broken_str.length() && broken_str.charAt(brack_idx) == '<') {
++num_brax; ++brack_idx;
}
return broken_str.substring(0, brack_idx - 2 * num_brax) // this substring ranges up to the first character that should be removed
+ fix(broken_str.substring(brack_idx)); // this substring is from the index after the last occurrence of '<' until the end of the string
}
}
This algorithm passes for the first three test cases, but the fourth one results in a runtime error. I've tried to create test cases of my own, and when the string is really long, I get a stack overflow. I was wondering if the algorithm can be optimized.
By the way, I know that this can be done very easily iteratively, but that's kind of boring. If this can only be made efficient iteratively then let me know.
Answer: Recursion is a great tool to have available when dealing with some problems, but each recursive call carries a cost. The cost depends on the language being used, and perhaps the OS architecture. As a consequence, the usefulness of recursion is limited by the circumstances in which the recursion is applied.
In Java each function call typically costs a number of kilobytes of memory as that's the size of the stack frame (and each call requires a new stack frame). Also, in Java, the stack size (memory) is pre-allocated as part of the VM settings (see -Xss commandline option) Other languages (for example, Go) have a much cheaper mechanism for stack management, and thus the cost of recursion is reduced. Java typically cannot go further than a few thousand calls deep in the stack, Go can go millions of calls deep. In your use case, the depth of the stack is proportional to the length of the input string, and strings longer than a few thousand characters will cause an out-of-memory problem.
Bottom line, is that recursion is not the solution to use for your problem. Even in other more stack-friendly languages I would still avoid recursion.
Solve it iteratively, using a StringBuilder, a Reader, and an if-statement...
public static String fix(Reader reader) {
int ch;
StringBuilder result = new StringBuilder();
while ((ch = reader.read()) >= 0) {
if (ch == '<') {
result.setLength(result.getLength() - 1);
} else {
result.append((char)ch);
}
}
return result.toString();
} | {
"domain": "codereview.stackexchange",
"id": 28997,
"tags": "java, algorithm, strings, programming-challenge, recursion"
} |
Teb Local Planner : Increasing x linear velocity | Question:
Hi,
I'm currently using the teb local planner for a tricycle robot and it gives good results but I would like to i Increasing the max_vel_x parameter doesn't seem to change the speed and I am having a hard time with it with all the optimization parameters. Increasing the maximum x linear velocity doesn't change anything and cmd_vel remains around 0.25/0.3 m/s maximum.
I looked a bit at the the teb questions and it seems for example, that putting weight_acc_lim* to 0.0 or changing the footprint model would reduce computation time . When setting weight_acc_lim* to 0.0, the robot is not able to move anymore and the error " trajectory not feasible" appears on every goal I am trying to send.
Here is my current configuration for teb local planner :
TebLocalPlannerROS:
odom_topic: odom
map_frame: /odom
# Trajectory
teb_autosize: True
dt_ref: 0.3
dt_hysteresis: 0.1
global_plan_overwrite_orientation: True
allow_init_with_backwards_motion: False
max_global_plan_lookahead_dist: 1.5
feasibility_check_no_poses: 5
# Robot
max_vel_x: 0.8
max_vel_x_backwards: 0.2
max_vel_y: 0.0
max_vel_theta: 0.4
acc_lim_x: 0.15
acc_lim_y: 0.0
acc_lim_theta: 0.05
# Carlike robot parameters
min_turning_radius: 0.15 # Min turning radius of the carlike robot (compute value using a model or adjust with rqt_reconfigure manually)
wheelbase: 0.864 # Wheelbase of our robot
cmd_angle_instead_rotvel: True # stage simulator takes the angle instead of the rotvel as input (twist message)
footprint_model: # types: "point", "circular", "two_circles", "line", "polygon"
type: "polygon"
radius: 0.2 # for type "circular"
line_start: [0.0, 0.0] # for type "line"
line_end: [0.4, 0.0] # for type "line"
front_offset: 0.2 # for type "two_circles"
front_radius: 0.2 # for type "two_circles"
rear_offset: 0.2 # for type "two_circles"
rear_radius: 0.2 # for type "two_circles"
vertices: [[1.1,0.4],[-0.3,0.4],[-0.3,-0.4],[1.1,-0.4]]
# GoalTolerance
xy_goal_tolerance: 0.2
yaw_goal_tolerance: 0.2
free_goal_vel: False
# Obstacles
min_obstacle_dist: 0.5
include_costmap_obstacles: True
costmap_obstacles_behind_robot_dist: 1
obstacle_poses_affected: 30
costmap_converter_plugin: ""
costmap_converter_spin_thread: True
costmap_converter_rate: 5
# Optimization
no_inner_iterations: 5
no_outer_iterations: 4
optimization_activate: True
optimization_verbose: False
penalty_epsilon: 0.05
weight_max_vel_x: 590
weight_max_vel_theta: 120
weight_acc_lim_x: 370
weight_acc_lim_theta: 200
weight_kinematics_nh: 10000
weight_kinematics_forward_drive: 1000
weight_kinematics_turning_radius: 300
weight_optimaltime: 860
weight_obstacle: 50
weight_dynamic_obstacle: 10 # not in use yet
# Homotopy Class Planner
enable_homotopy_class_planning: True
enable_multithreading: True
simple_exploration: False
max_number_classes: 4
selection_cost_hysteresis: 1.0
selection_obst_cost_scale: 1.0
selection_alternative_time_cost: False
roadmap_graph_no_samples: 15
roadmap_graph_area_width: 5
h_signature_prescaler: 0.5
h_signature_threshold: 0.1
obstacle_keypoint_offset: 0.1
obstacle_heading_threshold: 0.45
visualize_hc_graph: False
I tried lots of combinations with rqt_recofnigure and this one gives good results except that i seems to block the linear velocity to small values. I would like to rpevent the robot from going backwards and avoid obstacles coming on its way. Is there any parameters changes that could help keep this characteristics while increasing the velocity of the robot.
Thanks !
Originally posted by TeddyBear on ROS Answers with karma: 21 on 2017-10-20
Post score: 2
Answer:
acc_lim_x: 0.15
acc_lim_theta: 0.05
I think your acceleration limits, especially the theta acceleration, are too low. I have found that if i set the theta acceleration limit very low, TEB will try to reverse more often than just rotate in place. This makes sense intuitively because the system will have to spend more time rotating then reversing. Also try upping your max_vel_theta limit.
By allowing the robot to rotate more freely you should find that it will stop trying to back up as much. You should also be able to relax the weight_kinematics_forward_drive to allow for reverse movements that are sometimes necessary.
Also, you have a whole bunch of footprint types listed. You should only have two lines, the footprint_model type and its corresponding option. For example if you want to use a polygon, have:
footprint:
type: "polygon"
vertices: [[1.1,0.4],[-0.3,0.4],[-0.3,-0.4],[1.1,-0.4]]
Here is my base_local_planner_params.yaml file for TEB configuration for a 3 wheeled differential robot that works ok:
TebLocalPlannerROS:
odom_topic: odom
map_frame: /odom
# Trajectory
teb_autosize: True
dt_ref: 0.4
dt_hysteresis: 0.1
global_plan_overwrite_orientation: True
# This is so we stick to the global plan and don't make s's
max_global_plan_lookahead_dist: 2.0
feasibility_check_no_poses: 5
# not sure what this does
global_plan_viapoint_sep: .5
# Robot
max_vel_x: 0.35
max_vel_x_backwards: 0.2
max_vel_theta: 0.7
acc_lim_x: 0.5
# If this is low, we overshoot on a rotate
acc_lim_theta: 0.5
min_turning_radius: 0.0
footprint_model:
type: "polygon"
vertices: [
[ 0.45, 0.375],
[ -0.45, 0.375],
[ -0.45, -0.375],
[ 0.45, -0.375],
]
# GoalTolerance
xy_goal_tolerance: 0.3
yaw_goal_tolerance: 0.2
free_goal_vel: False
# Obstacles
min_obstacle_dist: 0.01
inflation_dist: 0.2
include_costmap_obstacles: True
costmap_obstacles_behind_robot_dist: 1.0
obstacle_poses_affected: 30
costmap_converter_plugin: ""
costmap_converter_spin_thread: True
costmap_converter_rate: 5
# Optimization
no_inner_iterations: 5
no_outer_iterations: 4
optimization_activate: True
optimization_verbose: False
penalty_epsilon: 0.04
weight_max_vel_x: 2
weight_max_vel_theta: 1
weight_acc_lim_x: 1
weight_acc_lim_theta: 1
weight_kinematics_nh: 1000
weight_kinematics_forward_drive: 150
weight_kinematics_turning_radius: 1
weight_optimaltime: 1
weight_obstacle: 50
weight_dynamic_obstacle: 10
selection_alternative_time_cost: False
weight_viapoint: 100
# Homotopy Class Planner
enable_homotopy_class_planning: True
enable_multithreading: True
simple_exploration: False
max_number_classes: 4
roadmap_graph_no_samples: 15
roadmap_graph_area_width: 5
h_signature_prescaler: 0.5
h_signature_threshold: 0.1
obstacle_keypoint_offset: 0.1
obstacle_heading_threshold: 0.45
visualize_hc_graph: False
Originally posted by psammut with karma: 258 on 2017-10-20
This answer was ACCEPTED on the original site
Post score: 4 | {
"domain": "robotics.stackexchange",
"id": 29152,
"tags": "navigation, teb-local-planner, velocity, planner, move-base"
} |
Efficient algorithm for simple constraint satisfaction problem | Question: There are
$k$ Boolean variables $x_1, x_2, \dots, x_k$.
$m$ arbitrary subsets of these variables such that sum of each set equals to $1$ (i.e., only one variable is $1$, the others are $0$). E.g., one of $m$ constraints may be $x_1 + x_3 + x_5 + x_6 = 1$.
$n$ arbitrary subsets of these variables such that sum of each set is at least $1$ (i.e., at least one of the variables should be $1$). E.g., one of $n$ constraints may be $x_1 + x_6 + x_8 \geq 1$.
The solutions are not necessary, but I want to know how many solutions exist.
What is an efficient way to calculate the number of solutions?
Answer: There is unlikely to be any efficient algorithm.
Your first class of constraints are monotone exactly-1 CNF clauses. Your second class of constraints are monotone CNF clauses. The monotone part indicates that negated literals aren't allowed (you can't have $x_1 - x_3 = 1$ or $x_1 - x_4 \ge 1$).
Thus, in the special case where you have only type-2 constraints, the problem becomes #monotone-SAT. Unfortunately, this problem is known to be hard. #SAT is #P-complete, and monotone #SAT is #P-complete as well: it is #P-complete even for monotone 2CNF clauses (i.e., type-2 constraints with only two variables). It is also known that it is NP-hard to approximate the number of solutions. As a result, there is unlikely to be any efficient solution unless the number of variables and constraints is fairly small. Of course, your problem (with a mixture of type-1 and type-2 constraints) is potentially even harder.
So what can you do, to make the best of the situation?
One approach is to code this as an instance of #SAT, and try applying some off-the-shelf #SAT solver. You can encode type-1 constraints in SAT using the methods described at Encoding 1-out-of-n constraint for SAT solvers.
Or, you could express the constraints as a BDD and then apply model-counting methods for BDDs. I expect this to perform worse than a #SAT solver, but you could try it.
Another approach is to use an approximation algorithm. There are existing algorithms for approximate-#SAT, though they too will hit a limit if you have too many variables and/or clauses. | {
"domain": "cs.stackexchange",
"id": 8956,
"tags": "algorithms, data-structures, satisfiability, integer-programming, constraint-satisfaction"
} |
Variations in solubility of total dissolved salts in water with temperature | Question: I want to know the solubility of "total dissolved salts" or mainly Calcium Chloride and Magnesium Chloride causing hardness of water.
Is this data published anywhere at different temperatures?
Answer: Google is your friend. Using "Calcium chloride solubility", you find plenty of references where this solubility is given at different temperatures. Expressed in grams $\ce{CaCl2}$ in $100$ mL water, it is $59.5$ à $0°$C, $64.7$ at $10°$C, $100$ at $20°$C, $128$ at $30°$C, $137$ at $50°$C, $147$ at $70°$C, and $159$ at $90°$C. Do the same with Magnesium chloride. | {
"domain": "chemistry.stackexchange",
"id": 13842,
"tags": "solubility, water-treatment"
} |
How can the atmospheric pressure be different in distinct points at the same altitude? | Question: From an hydrostatic point of view, the pressure in a fluid should be the same at the same depth/altitude.
Obviously, in our atmosphere that does not happen. I am guessing that the main reason is the fact that the atmosphere cannot be regarded as hydrostatic.
Is this the reason? How exactly can we explain these pressure differences?
I understand that a higher pressure region must have a higher density, and therefore it would take time for reducing such density gradient. But how fast is this? In the order of the speed of sound? Or it has nothing to do with it?
Answer: The air moves in great swirls.
In places where the air is being warmed from below it moves up.
That causes air to be sucked in from below, and spread out at the top.
What it sees as the reason to be sucked in is a lower pressure pulling it.
When any fluid is pulled in to a center, its angular momentum is conserved (and it has plenty of that because it is spinning with the earth), so it spins faster.
(Coriolis force is another way to describe this.)
So, you have meteorological low pressure areas, where the air is spinning the same direction as the earth, only faster, and high pressure areas, which are the opposite.
So that's why you can see different pressures at sea level or any other altitude.
(By the way, a low atmospheric pressure at sea level causes the water itself to be pulled up, resulting in "storm surge".) | {
"domain": "physics.stackexchange",
"id": 40007,
"tags": "fluid-dynamics, pressure, atmospheric-science, fluid-statics"
} |
Is ROS (Robot Operating System) mandatory? | Question: Do we have to build ROS for robotic research/application? What is the main advantage? When or in which situations ROS is mandatory?
Answer: I'm back to a computer!
Like I said in this comment, ROS is generally not mandatory. ROS is one platform among many, famous mostly due to Willow Garage giving away free robots at some point in time to whoever wrote the most ROS modules. That said, it's not the best platform possible, and is certainly nothing overly special. Particularly, the said contest resulted in a lot of low-quality modules just to get the numbers higher.
Over time, the quality of the ROS modules have got better and there are a lot of them as well. Using ROS therefore, you have the benefit of reusing a lot of what's already done. You can read here some reasons why you may want to use ROS.
With that in mind, you should look out for the side effects as well.
Distributed Control
With ROS, you have many nodes that talk with each other through the network. This is sometimes good and easy, but generally results in a wildly varying delay in reception of messages. As a result, you would have to have a large control delay to make sure all messages arrive, which means you cannot react fast to events, which in turn means you have to move your robot at slower speeds so as not to miss those events.
Believe it or not, people actually do robot control through ROS (MoveIt! is the name of the relevant set of components). Slow. Unsafe. But easy!
Non-Real-time
Even when not distributed, ROS is not a real-time platform. That means you are at complete discretion of the Linux kernel to schedule your tasks at any time it sees fit. This is ok for some applications, but not ok for others. So you need to look at your own requirements. Do you need to have a guarantee that your task would finish within a known time frame? If so, you need a real-time system.
Hosted Runtime Environment
Another point to consider is that, while ROS is a general protocol of communication, it's essentially only supported for hosted environments. Hosted means the code runs on top of a kernel, as opposed to freestanding which means the code directly controls the hardware (e.g., on a microcontroller).
If your robotics application is run close to hardware, and therefore you would require a program that runs on a microcontroller, ROS is of no help to you.
Platform Lock-in
Last but not least, developing for ROS results in a platform lock-in. This means that if in the future, for one reason or another, you decide to base your work on another robotic platform, such as OROCOS, YARP, etc, that would be excruciating.
You would also be somewhat locked to Linux. Linux is the best, no doubt, but one day you may end up having to support another system, such as QNX, VxWorks etc, and you would have problems there as well.
If you are writing for microcontrollers, then forget about ROS. If you are writing high-level modules, I highly recommend writing portable code. For example, say you have developed a new sensor, and you want to write a module that acquires data from this sensor, which is connected to your computer via the CAN bus.
What you could do in this situation is to write an independent library, with functions that are able to work with your sensor and acquire data. You could even think of spawning a thread in the library that acquires and enqueues data periodically.
Once you have this helper library, you are free to write a CLI, GUI, ROS module, OROCOS module, YARP module, connect to Matlab, or whatever else you want to use to interact with your sensor.
Final note: what I've said here is generally applicable to all robotics platforms and not just ROS. | {
"domain": "robotics.stackexchange",
"id": 1248,
"tags": "ros, simulator"
} |
how to compensate gravity in gazebo? | Question:
Hi
I have set PID controllers in gazebo, now I want to add the gravity compensation. what should I do?
actually i want to add offset to the output of PID controllers.
is there any topic that i can publish to it ?
i'm working on a industrial robot that have 5 joints + gantry ( visual servo ) .
Originally posted by babaksn on ROS Answers with karma: 15 on 2015-02-28
Post score: 0
Answer:
I believe there is no ready-made controller providing PID control with feedforward gravity compensation. You could look into modifying the PID approach you are already using (custom gazebo plugin? gazebo_ros_control?) and adding gravity compensation.
Originally posted by Stefan Kohlbrecher with karma: 24361 on 2015-02-28
This answer was ACCEPTED on the original site
Post score: 1
Original comments
Comment by Adolfo Rodriguez T on 2015-03-02:
I would add it either to your 'robot driver', or as a separate controller that runs in parallel to the PID. I'd avoid trying to extend the PID implementation, as gravity compensation is model-based, whereas a plain PID is not. More details on your approach would help, as Stefan points out. | {
"domain": "robotics.stackexchange",
"id": 21016,
"tags": "ros, pid, microcontroller, gazebo, joint"
} |
UR5 Jogging using Jacobian | Question:
I am not sure if this is the right place for this question, but here it is :
Based on the suggestions I found in the forums I am trying to get UR5 to jog.
I have set the type of
arm_controller :
type: velocity_controllers/JointTrajectoryController
So I am guessing, it needs only velocity commands.
Here is the code till now :
const std::vector<std::string> joint_names = jmg->getActiveJointModelNames();
ros::Rate loop_rate(5);
while(ros::ok()) {
Eigen::MatrixXd jacobian = group->getCurrentState()->getJacobian(jmg);
std::cout << "jacobian : " << jacobian << std::endl;
Eigen::VectorXd ee_vel(6);
ee_vel << -1.0, 0.0, 0.0, 0.0, 0.0, 0.0;
std::cout << "ee vel : " << ee_vel << std::endl;
Eigen::VectorXd joint_vel = jacobian.inverse() * ee_vel;
std::cout << "joint vel : " << joint_vel << std::endl;
control_msgs::FollowJointTrajectoryGoal goal;
goal.trajectory.joint_names = joint_names;
goal.trajectory.points.resize(1);
goal.trajectory.points[0].positions.resize(6);
std::vector<double> joint_vals = group->getCurrentJointValues();
for (int i = 0; i < 6; ++i) {
goal.trajectory.points[0].positions[i] = joint_vals[i];
}
goal.trajectory.points[0].velocities.resize(6);
for (size_t j = 0; j < 6; ++j) {
goal.trajectory.points[0].velocities[j] = joint_vel[j];
}
// To be reached 1 second after starting along the trajectory
goal.trajectory.points[0].time_from_start = ros::Duration(0.0);
goal.trajectory.header.stamp = ros::Time::now();
traj_client->sendGoal(goal);
loop_rate.sleep();
}
I get a message saying :
[ INFO] [1475846670.761351237]: Received new trajectory execution service request...
[ INFO] [1475846670.761755780]: on_goal
[ INFO] [1475846670.981847762]: Execution completed: SUCCEEDED
[ INFO] [1475846672.982605141]: on_goal
But then the ur_driver crashes during the first loop.
Even though UR5 driver is capable of streaming joint data,
In ur_driver.cpp
URDriver::setSpeed(...)
since there's no controller to support that, I am sending a joint trajectory with only one point in it.
The first few messages on ´/follow_joint_trajectory/goal´ are :
header:
seq: 0
stamp:
secs: 1476096642
nsecs: 351799456
frame_id: ''
goal_id:
stamp:
secs: 1476096642
nsecs: 351800506
id:
goal:
trajectory:
header:
seq: 0
stamp:
secs: 1476096642
nsecs: 351783927
frame_id: ''
joint_names: ['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint', 'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
points:
-
positions: [-0.33116084734071904, -1.6651738325702112, -1.6492660681353968, 3.313994884490967, -1.9009583632098597, 1.5703166723251343]
velocities: [-0.7625009696124342, 2.0051739782259315, -2.213010444549463, 0.20772017660027928, -0.7625008941085881, -0.00035870155518285494]
accelerations: []
effort: []
time_from_start:
secs: 0
nsecs: 0
path_tolerance: []
goal_tolerance: []
goal_time_tolerance:
secs: 0
nsecs: 0
---
header:
seq: 1
stamp:
secs: 1476096642
nsecs: 551965302
frame_id: ''
goal_id:
stamp:
secs: 1476096642
nsecs: 551965571
id: /cont_replan-3-1476096642.551965571
goal:
trajectory:
header:
seq: 0
stamp:
secs: 1476096642
nsecs: 551939357
frame_id: ''
joint_names: ['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint', 'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
points:
-
positions: [-0.3311851660357874, -1.6651976744281214, -1.6492541472064417, 3.314030647277832, -1.9010065237628382, 1.5701848268508911]
velocities: [-0.7625342648492489, 2.0051424575369525, -2.2130284353503162, 0.2077758962011466, -0.7625341972159444, -0.0003395046112384696]
accelerations: []
effort: []
time_from_start:
secs: 0
nsecs: 0
path_tolerance: []
goal_tolerance: []
goal_time_tolerance:
secs: 0
nsecs: 0
---
Is my approach correct or is there a better way to do it? And why does the driver crash?
Originally posted by ipa-hsd on ROS Answers with karma: 150 on 2016-10-10
Post score: 1
Original comments
Comment by ipa-hsd on 2016-10-10:
Is it possible that since the velocities are not starting from 0's, the driver is not able to handle these velocities? In that case, how to generate a trapezoidal profile for jog mode?
Comment by power93 on 2018-03-05:
Hi,
Is true that i can send sample by sample my trajectory using speedj? Is there an example in c++ of a path following sending sample each 125 Hz?
Thanks in advance.
Comment by Dreamer on 2022-11-11:
Hi,I'm researching on admittance control of UR5, but I am confused about how to realize the velocity control of UR5? Your code shown in this page is of great helpful for me, but can you show more details about the header file which should be included?and what is the variable jmg?Thanks a lot.
Answer:
Update: a new package that handles Jacobian jogging: jog_arm
You can use UR's python interface, like this (bypassing ros_control). The downside is, there won't be any collision checking. We used a 50Hz loop rate.
// Compose a string that contains URScript velocity command
sprintf(cmd_, "speedl([%1.5f, %1.5f, %1.5f, %1.5f, %1.5f, %1.5f], 0.2, 0.1)\n", delta.getX(), delta.getY(), delta.getZ(), 0., 0., 0.);
ROS_INFO_STREAM(cmd_);
// Copy command string to ROS message string
urscriptString_.data = cmd_;
// Send the compliant motion command to the UR5
velPub_.publish(urscriptString_);
Our lab has also used the Jacobian method like you were trying to do, but only on a Motoman arm (never tried it with a UR).
Originally posted by AndyZe with karma: 2331 on 2016-10-10
This answer was ACCEPTED on the original site
Post score: 0
Original comments
Comment by AndyZe on 2016-10-10:
Here's the publisher definition:
// Publisher for the velocity commands
velPub_ = n_.advertise<std_msgs::String>("/right_ur5_controller/right_ur5_URScript", 1);
Comment by ipa-hsd on 2016-10-10:
the UR modern driver does exactly this. In ur_realtime_communication.cpp, the setSpeed function calls
``sprintf(cmd, "speedj([%1.5f, %1.5f, %1.5f, %1.5f, %1.5f, %1.5f], %f)\n",q0, q1, q2, q3, q4, q5, acc);`
Comment by ipa-hsd on 2016-10-10:
Does the trajectory has to have at least 2 points? Which actually makes sense..
Comment by AndyZe on 2016-10-10:
We're just sending one velocity command at a time, which works fine. But if you are sending position commands, I'm not sure.
Comment by ipa-hsd on 2016-10-11:
No, as I mentioned, I am sending velocity commands
Comment by ipa-hsd on 2016-10-12:
The solution works for me right now. Thanks!
Comment by toluwajosh on 2018-01-25:
Hi, could you please recommend a resource or provide a link to how I can obtain the Jacobian for a Motoman robot arm. I have tried some other methods and they don't seem to work.
Comment by AndyZe on 2018-01-25:
@intelharsh 's approach should work:
Eigen::MatrixXd jacobian = group->getCurrentState()->getJacobian(jmg);
Is MoveIt! set up correctly? Like, can you plan a move in RViz?
There's also a new pkg for this: jog_arm
Comment by toluwajosh on 2018-01-25:
So, I'm trying to include this function in a node which works well with Rviz. After adding the code snippet and trying to make with catkin_make I get the error; 'undefined reference to robot_model_loader, etc. So I'm wondering what could I be doing wrong.
Comment by toluwajosh on 2018-01-25:
I'll check out the jog_arm. Thanks | {
"domain": "robotics.stackexchange",
"id": 25946,
"tags": "ros, moveit, ros-control, ur-driver"
} |
Is PPO a policy-based method or an actor-critique-based method? | Question: as far as i understand there are 3 categories of Reinforcement algorithms:
Value-based methods (like DQN or Sarsa)
Policy-based methods (like REINFORCE)
Actor-critic-based methods (like A2C)
To which of those categories does PPO (Proximal Policy Optimization) belong to? As the name suggest it is based on policy optimization but I think it also uses an actor-critic-based structure?
Answer: PPO belongs to both category 2 and 3:
It has an actor-critic structure, meaning that it learns the optimal policy via the actor network, and the value function via the critic network.
Moreover, differently from SAC for example, the actor is learned via policy gradient (not Q-learning as in SAC). Therefore it is also policy-based.
Update: defining value-based, policy-based, and actor-critic agents - I'll provide an intuitive rather than technical definition.
In general, a reinforcement learning agent can be equipped (let's say) with one or more components such as a (figure from David Silver's lecture 1):
Policy, representing the agent's behavior function typically mapping states to actions;
Value function, i.e., a function or model that predicts the expected future reward;
Model of the environment that predicts its next state.
Now, not all these components are required (e.g., the env's model) and some of them can be implicit (like the policy, which can be extracted from the value function.) According to which is the "main" component, we can categorize agents into:
Value-based, which need only to learn a value function (either the state-value or state-action one). For such agents learning the value function is enough since the policy can be "extracted" from it. For example, from a Q-function is very easy to derive a policy that predicts the action associated with the higher state-action value. You can also have a policy from the value function itself, but a one-step model of the environment is needed since the future is involved. TD-learning and Q-learning are both examples of value-based agents. Moreover, we consider value functions the state-value, $V(s)$, action-value (or state-action) function, $Q(s,a)$, and the advantage function, $A(s,a)$.
Policy-based methods that learn directly a policy instead of first approximating a value function. The policy can be deterministic (i.e., a mapping) or stochastic (a probability distribution.) In both cases, a popular approach is to learn the policy's parameters by following the (deterministic) policy gradient, which induces a further specialization of this class of agents. Another way is to use evolutionary strategies (ES), or optimization algorithms. Classical REINFORCE and DPG are example policy-based agents.
Actor-critic agents, which merge both value- and policy-based methods by learning two models: the actor (policy) and critic (value function). Now the critic can be found having various roles, from variance reduction serving as a baseline (e.g., in A2C, PPO) but also as a way to learn a policy (e.g., DDPG, SAC, TD3). The important thing is that we don't use the critic to derive an implicit policy. | {
"domain": "ai.stackexchange",
"id": 4100,
"tags": "reinforcement-learning, actor-critic-methods, proximal-policy-optimization"
} |
Using Array to store calculations in VBA | Question: I have the following VBA code, which works perfectly well to calculate "q"
However, the code is very slow and that is due to the large number of q's being calculated (roughly 7.2m q's are being calculated).
So I thought the best way to proceed is to try store the calculated q values in an array and then dump them into the spreadsheet once all of them are calculated.
The q's will vary for each i and j combination. I have tried to add the following to try and store results in an array to the to the main code given below:
Dim results() as variant
Redim results(I,j)
Results (I,j)= q
Range("G5").value=results
This did not work and I know it does not even look half right, but if you could help me spot where I am going wrong it would be really appreciated.
Sub mort()
Dim age As Integer
Dim month As Integer
For i = 0 To ActiveSheet.Range("F5", Range("F5").End(xlDown)).Count
For j = 0 To ActiveSheet.Range("G3", Range("G3").End(xlToRight)).Count
gender = Range("C5").Offset(i, 0)
If gender = "F" Then
mortable = Worksheets("Female Tabs").Range("A3:C122")
Else
mortable = Worksheets("Male Tabs").Range("A3:C122")
End If
month = Range("G3").Offset(0, j)
age = WorksheetFunction.RoundDown(Range("F5").Offset(i, 0) + (month - 3) / 12, 0)
If age < 119 Then
a = (12 - ((month - 3) Mod 12)) / 12
a1 = Application.VLookup(age, mortable, 3, False)
b = ((month - 3) Mod 12) / 12
b1 = Application.VLookup(age + 1, mortable, 3, False)
Else
a1 = 0
b1 = 0
End If
q = (1 / 12) * (a * a1 + b * b1)
Worksheets("Policy Mortality Qx").Range("G5").Offset(i, j).Value = q
Next j
Next i
End Sub
Answer:
it is an actuarial model that I am working on. So, I have one list of
Males and Females, with their respective dates of birth/ages on one
tab. On another tab, I have mortality rates for males, and on another
tab mortality rates for females. So my code is supposed to circulate
through each person, identify gender and age and apply mortality rates
at each future time period. Sorry if that is a little unclear. I guess
it would be useful for me to understand in general how a calculation
carried out in vba can be stored in an array and then dumped into a
worksheet.
All right, to address that, what I would do is make a dictionary out of the two actuarial tables on the male worksheet and female worksheet.
Then create an array of the data you want to populate, and look it up in whatever dictionary is the correct one.
Without understanding exactly how your table is set up, I can only offer this example -
Option Explicit
Public Sub ArrayLookupAndPopulate()
Dim firstTable As Object
Set firstTable = CreateObject("Scripting.Dictionary")
Dim secondTable As Object
Set secondTable = CreateObject("Scripting.Dictionary")
Dim rowNumber As Long
Dim myKey As String
Dim lookupArray As Variant
Dim myIndex As Long
For rowNumber = 1 To 10
firstTable.Add CStr(Sheet1.Cells(rowNumber, 1)), Sheet1.Cells(rowNumber, 3)
secondTable.Add CStr(Sheet2.Cells(rowNumber, 1)), Sheet2.Cells(rowNumber, 3)
Next
Dim lastRow As Long
lastRow = Sheet3.Cells(Rows.Count, "A").End(xlUp).Row
Dim lastColumn As Long
lastColumn = Sheet3.Cells(1, Columns.Count).End(xlToLeft).Column + 1
ReDim lookupArray(1 To lastRow, 1 To lastColumn)
lookupArray = Sheet3.Range(Cells(1, 1), Cells(lastRow, lastColumn))
For myIndex = 1 To 9
myKey = lookupArray(myIndex, 2)
If lookupArray(myIndex, 1) = "First" Then lookupArray(myIndex, 3) = firstTable.Item(myKey)
If lookupArray(myIndex, 1) = "Second" Then lookupArray(myIndex, 3) = secondTable.Item(myKey)
Next
Sheet3.Range("F1:H9") = lookupArray
End Sub
You just need to adjust the names and the ranges because I was working with something static.
I posted this for review Creating two dictionaries to lookup values into an array | {
"domain": "codereview.stackexchange",
"id": 19977,
"tags": "array, vba, excel"
} |
Determining the moment of force | Question: A force $F = 3i + 2j$ passes through through a point $P$ with respect to an origin $O$. How do I determine the moment of the force at the origin.
Answer: The torque, or moment of force, is simply $\vec{\tau} = \vec{r} \times \vec{F}$, where $\vec{r}$ is the vector OP, and $\vec{F}$ is your force. Of course changing the origin of your SRS will change the torque.
By the way, this is not the place for homework related questions! | {
"domain": "physics.stackexchange",
"id": 15755,
"tags": "homework-and-exercises, forces, vectors"
} |
Why don't helicopters use reaction wheels to counter the main rotor? | Question: As the main title says. I'm finding myself wondering about helicopters. The tail rotor is a vulnerable and key piece of equipment, especially on military helicopters. I know some helicopters instead use two main rotors (for example the KA-50).
Why not use a reaction wheel? The main engine could power the wheel, and it could be placed in an armored area and less vulnerable to fragmentation munition. Is it because any reaction wheel would be prohibitively large?
Answer: You're talking about a device (in helicopters the tail fan imparting horizontal thrust) that counteracts the torque imparted on the main rotor (and therefore on the helicopter) by the surrounding air as the main rotor is dragged through the air.
You propose instead to impart an opposite torque through a reaction wheel. That would indeed impart an opposite torque for short lengths of time. However, you don't get a torque from spinning a reaction wheel at constant angular velocity but by changing and accelerating that angular velocity.
Now the torque imparted on the helicopter by the air through the main rotor is steady - or at least its of roughly constant direction. Therefore, to counter that torque, the reaction wheel would have to accelerated uniformly and indefinitely. Clearly this is impossible from an engineering standpoint.
You can also think of this from a conservation of angular momentum, without thinking about the origin of the torques. The air imparts a steady angular impulse to the helicopter. Therefore, the helicopter system's angular momentum must increase steadily (unless there's a countering torque from the tailfan). So either that angular momentum is the spinning of the helicopter's body (which is what we're trying to avoid) or that of the reaction wheel, whose angular momentum must be steadily increasing under the action of the angular impulse to the system. | {
"domain": "physics.stackexchange",
"id": 33129,
"tags": "angular-momentum, rotational-dynamics, conservation-laws, aerodynamics, rigid-body-dynamics"
} |
Incredible electron drift velocity in atomic thin layer of graphene? | Question: Free electrons in atomic thin layers of graphene behave more like photons (Bosons) than fermions reaching incredible drift velocities and mobility which reach speeds as reported by this article in the order of $700Km/s$ or more (see Abstract figure in above referenced article). These are incredible drift velocities relative to the sub-millimeter or less per second velocities in normal conductors.
This reminds me however of the drift velocity of electron Cooper Pairs in Type I superconductors where we have very similar values.
Are these two apparent different phenomena, quantum mechanisms, somehow correlated or similar leading to this increased electron mobility?
Or if not what exactly are the mechanics of atomic thin layer of graphene that results to this incredible mobility despite the fact that graphite (bulk form of graphene) is not the best conductor of electricity?
How is it possible graphene to achieve such speeds similar to superconductors but at room temperature!?
Would it not be more practical and feasible to consider graphene nanotube wires for power transmission instead of superconductors? (Current superconductors demand very low temperatures which are impractical for power transmission).
Additional Reference: article (speeds reported not for atomic thin layer but Ultra thin layer graphene semiconductors. Speed up to $200Km/s$ or more at room temperature, see fig.3).
Answer: I would say that superconductivity and the high mobility of graphene are completely unrelated.
From my understanding, the high mobility of graphene comes from two things. One is the intrinsic band structure. Graphene has a symmetry protected band crossing, which means that the low energy Hamiltonian is that of massless Dirac fermions, in contrast to a normal metal where you have a quadratic band dispersion. This leads to a very high Fermi velocity, which is essentially the slope of the Dirac cone in momentum space.
The second thing is that graphene is naturally a very clean system, with few extrinsic defects. This is related to the fact that it is only one atom thick and has very strong in plane bonds. This leads to a very long mean free path where the electrons can travel a long distance without scattering.
Interestingly enough, graphene does not have the highest mobility of two dimensional materials or heterostructures. Some of the highest mobilities are found in GaAs/GaAlAs heterostructures which can have mobility > 10^7 cm^2/Vs and are achieved through highly optimized and clean growth via molecular beam epitaxy. These high mobilities are what allowed us to observe interacting quantum phenomenon such as the fractional quantum hall effect in these systems. | {
"domain": "physics.stackexchange",
"id": 88347,
"tags": "quantum-mechanics, condensed-matter, electrons, superconductivity, graphene"
} |
A question on deriving a formula for a rotational object | Question: I have this question assigned, but I really am stuck on how to do it:
A bullet is shot through two cardboard disks attached a distance $D$ apart to a shaft turning with a rotational period $T$, as shown.
Derive a formula for the bullet speed $v$ in terms of $D$, $T$, and a measured angle $\theta$ between the position of the hole in the first disk and that of the hole in the second. If required, use $\pi$, not its numeric equivalent. Both of the holes lie at the same radial distance from the shaft.$\theta$ measures the angular displacement between the two holes; for instance, $\theta=0$ means that the holes are in a line and $\theta=\pi$ means that when one hole is up, the other is down. Assume that the bullet must travel through the set of disks within a single revolution.
So far I've compiled some information that I thought might help me, but I don't know how to apply any of it:
$$\theta=2\pi$$
$$rotationalperiod=T$$
so the angular velocity is:
$$w={2\pi\over T}$$
I know that I need $D$ on its own, and the only way I could think of that was to do the distance formula
$$v={D\over t}$$
$$D={vt}$$
I'm not sure if this information is enough to derive an equation, if it is I just don't know how to apply it all. Is it valid to use the $t$ in this formula in place of the $T$ from the angular velocity one? What other information do I need?
Answer:
Is it valid to use the $t$ in this formula in place of the $T$ from the angular velocity one?
No, $T$ is the time for one revolution. The time $t$ that you calculate is the time between the bullet hitting the first and the second plate. Two very different things.
$\theta=2\pi$
This is not correct. $\theta$ is defined as the angle between the holes, not a whole round $2\pi$.
What other information do I need?
Let's consider the angle $\theta$ in the same way as you considered $D$. $D$ is the distance passed in time $t$ at the speed $v$. Similarly, $\theta$ is angular distance passed in time $t$ at the angular speed $\omega$. You can therefor set up a similar expression for the angular motion:
$$\theta=\omega t$$
Use this with the other expressions you already have,
$$\omega=\frac{2\pi}{T}\quad\text{and}\quad D=vt\:,$$
and you are done. | {
"domain": "physics.stackexchange",
"id": 24629,
"tags": "angular-velocity"
} |
How will the Milky Way / Andromeda combined galaxy appear in 4 billion years? | Question: In 4 billion years, do we we have an idea of what the combined Milky Way and Andromeda galaxy pairing will look like?
In particular, will the presumed black hole at the centre of our galaxy, (and I guess there is one at the centre of the Andromeda galaxy also), have an effect on the morphology of the combined galaxies, before eventually absorbing them over the much longer timescale?
I assume that the Large and Small Magallanic Clouds, and other satellite systems of both galaxies, may be drawn in to the region.
I also wonder will the shape of both galaxies have changed, before they merge in 4 billion years time?
Will it be a collision (or I should say, in reality, a slow merger rather than anything dramatic) of two barred or elliptical galaxies, rather than the spiral armed galaxies we see now?
Although I have searched for duplicates on both PSE and Astronomy SE, (and on Google) I can't immediately find an answer to these particular questions, but my apologies if it has already been answered and I will delete this post if an answer exists.
Answer: In $4\times10^9$yr, M31 and MW (Milky Way) will have merged to form an elliptical galaxy. The internal spiral structures of either progenitor and their bars will be destroyed in the process, leaving a smooth ellipsoidal distribution of stars.
The supermassive black holes (SMBHs; note that the one in M31 is $\sim100$ times more massive than that in MW) will form a close binary which (most likely -- the precise astrophysical mechanism are not yet fully understood) will have merged to form a single SMBH, emitting gravitational radiation in the last stages of coalescing.
Some of the gas (ISM) of the progenitors will assemble in the core of the new galaxy where it may form new stars (via a 'starburst'). Some of this gas may accrete onto the SMBH, forming an active galactic nucleus (AGN). The momentum and energy feedback from the AGN will eventually clear the new galaxy of most of its gas, leaving it 'red and dead' (= no longer star forming, hence only having red stars) -- like most massive ellipticals.
This answer is based on the generally accepted astrophysical wisdom, which in turn is based on observations, analytic arguments, and computer ($N$-body) simulations. | {
"domain": "physics.stackexchange",
"id": 23882,
"tags": "black-holes, galaxies"
} |
Local charge conservation in quantum mechanics | Question: The description of charged particles in electrodynamics obeys the continuity equation,
$$ \nabla \cdot \textbf{J} = -\frac{\partial \rho}{\partial t} $$
With the common physical understanding of this equation being that it describes local charge conservation.
My question is whether the quantum mechanical description of charged particles (like an electron) is consistent with this. Since in quantum mechanics we can't associate a well defined trajectory with the charged particle I can't see how local charge conservation is incorporated in quantum mechanics.
Answer: Particles in quantum mechanics obey a similar continuity equation for probability. This is necessary for probability conservation. Whenever the probability for a particle being in a particular region increases, the probability for finding the particle in the rest of space must decrease. The total probability of finding the particle must always be 1 (or 100% if you prefer). If the particle has a charge associated with it, we consider the probability distribution of charge. This is found by just multiplying the spacial probability distribution by the charge of the particle. This charge probability distribution does indeed obey the continuity equation.
Too much information:
In quantum field theory, local charge conservation plays a very important role. Noether's theorem associates every conservation law with a symmetry. The symmetry associated with charge conservation is gauge invariance. (Gauge invariance means we can use multiple scalar and vector potential functions for the same physical situation. For example, you can add any constant to the electric potential function $V(\mathbf{r})$ without changing $\mathbf{E}=-\nabla V$.) Generalizations of the gauge invariance of electromagnetism allow us to construct particles with more interesting conserved charges, like the color charge associated with the strong force. | {
"domain": "physics.stackexchange",
"id": 70812,
"tags": "quantum-mechanics, electromagnetism, conservation-laws"
} |
32-bit ALU design implementation and testbench | Question: This is 32bit ALU with a zero flag,
F2:0 Function
000 A AND B
001 A OR B
010 A + B
011 not used
100 A AND B
101 A OR B
110 A − B
111 SLT
SLT is set less than, it sets the least the output of ALU to 1 if A < B
This is the ALU module
module alu(input logic [31:0] a, input logic [31:0] b, input logic [2:0] f, output logic [31:0] out, output logic zero);
logic [31:0]tmp;
always @(a, b,f)
begin
if (f == 3'b000) // And
out = a & b;
else if( f == 3'b001) // Or
out = a | b;
else if( f == 3'b010) // Add
out = a + b;
else if( f == 3'b100) // New and
out = a & ~b;
else if( f == 3'b101) // New or
out = a | ~b;
else if( f == 3'b110) // SUB
out = a - b;
else if( f == 3'b111) // SLT
begin
tmp = a - b;
out[31:1] = 31'h0;
out[0] = (tmp[31] == 1'b1);
end
if (out == 32'h00000000)
zero = 1;
else
zero = 0;
end
endmodule
This is the test bench I built for the code
module alu_tb;
reg[31:0] a;
reg [31:0] b;
reg[2:0] f;
wire [31:0] out;
wire zero;
alu DUT (a,b,f,out,zero);
initial begin
$dumpfile("alu.vcd");
$dumpvars(0, DUT);
$monitor("A = 0x%x, B = 0x%x, f=0b%b\n\tOut = 0x%x, z = %b", a, b,f, out, zero);
f = 3'b010; // 0 + 0
a = 32'h0000_0000;
b = 32'h0000_0000;
#10
if ( out !== 32'h0 | zero !== 1'b1)
$display("\t%s0 + 0 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h0, 1'b1, "\033[0m");
f = 3'b010; // 0 + (-1)
a = 32'h0000_0000;
b = 32'hFFFF_FFFF;
#10
if ( out !== 32'hFFFF_FFFF | zero !== 1'b0)
$display("\t%s0 + (-1) failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'hFFFF_FFFF, 1'b0, "\033[0m");
f = 3'b010; // 1 + (-1)
a = 32'h0000_0001;
b = 32'hFFFF_FFFF;
#10
if ( out !== 32'h0 | zero !== 1'b1)
$display("\t%s1 + (-1) failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h0, 1'b1, "\033[0m");
f = 3'b010; // FF + 1
a = 32'h0000_00FF;
b = 32'h0000_0001;
#10;
if ( out !== 32'h100 | zero !== 1'b0)
$display("\t%s0xFF + 1 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h100, 1'b0, "\033[0m");
f = 3'b110; // 0 - 0
a = 32'h0000_0000;
b = 32'h0000_0000;
#10;
if ( out !== 32'h0 | zero !== 1'b1)
$display("\t%s0 - 0 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h0, 1'b1, "\033[0m");
f = 3'b110; // 0 - (-1)
a = 32'h0000_0000;
b = 32'hFFFF_FFFF;
#10;
if ( out !== 32'h1 | zero !== 1'b0)
$display("\t%s0 - (-1) failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h1, 1'b0, "\033[0m");
f = 3'b110; // 1 - 1
a = 32'h0000_0001;
b = 32'h0000_0001;
#10;
if ( out !== 32'h0 | zero !== 1'b1)
$display("\t%s1 - 1 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h0, 1'b1, "\033[0m");
f = 3'b110; // 100 - 1
a = 32'h0000_0100;
b = 32'h0000_0001;
#10;
if ( out !== 32'hFF | zero !== 1'b0)
$display("\t%s100 - 1 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'hFF, 1'b0, "\033[0m");
f = 3'b111; // SLT 0, 0
a = 32'h0000_0000;
b = 32'h0000_0000;
#10;
if ( out !== 32'h0 | zero !== 1'b1)
$display("\t%sSLT 0, 0 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h0, 1'b1, "\033[0m");
f = 3'b111; // SLT 0, 1
a = 32'h0000_0000;
b = 32'h0000_0001;
#10;
if ( out !== 32'h1 | zero !== 1'b0)
$display("\t%sSLT 0, 1 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h1, 1'b0, "\033[0m");
f = 3'b111; // SLT 0, -1
a = 32'h0000_0000;
b = 32'hFFFF_FFFF;
#10;
if ( out !== 32'h0 | zero !== 1'b1)
$display("\t%sSLT 0, -1 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h0, 1'b1, "\033[0m");
f = 3'b111; // SLT 1, 0
a = 32'h0000_0001;
b = 32'h0000_0000;
#10;
if ( out !== 32'h0 | zero !== 1'b1)
$display("\t%sSLT 1, 0 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h0, 1'b1, "\033[0m");
f = 3'b111; // SLT -1, 0
a = 32'hFFFF_FFFF;
b = 32'h0000_0000;
#10;
if ( out !== 32'h1 | zero !== 1'b0)
$display("\t%sSLT -1, 0 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h1, 1'b0, "\033[0m");
f = 3'b000; // -1 & -1
a = 32'hFFFF_FFFF;
b = 32'hFFFF_FFFF;
#10;
if ( out !== 32'hFFFF_FFFF | zero !== 1'b0)
$display("\t%s 0xFFFFFFFF & 0xFFFFFFFF failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'hFFFF_FFFF, 1'b0, "\033[0m");
f = 3'b000; // -1 & 12345678
a = 32'hFFFF_FFFF;
b = 32'h1234_5678;
#10;
if ( out !== 32'h1234_5678 | zero !== 1'b0)
$display("\t%s0xFFFFFFFF & 0x12345678 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h12345678, 1'b0, "\033[0m");
f = 3'b000; // 12345678 & 87654321
a = 32'h1234_5678;
b = 32'h8765_4321;
#10;
if ( out !== 32'h02244220 | zero !== 1'b0)
$display("\t%s0x12345678 & 0x87654321 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h02244220, 1'b0, "\033[0m");
f = 3'b000; // -1 & 0
a = 32'hFFFF_FFFF;
b = 32'h0000_0000;
#10;
if ( out !== 32'h0 | zero !== 1'b1)
$display("\t%s0xFFFFFFFF & 0x0 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h0, 1'b1, "\033[0m");
f = 3'b001; // -1 | -1
a = 32'hFFFF_FFFF;
b = 32'hFFFF_FFFF;
#10;
if ( out !== 32'hFFFF_FFFF | zero !== 1'b0)
$display("\t%s0xFFFFFFFF | 0xFFFFFFFF failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'hFFFFFFFF, 1'b0, "\033[0m");
f = 3'b001; // 12345678 | 87654321
a = 32'h1234_5678;
b = 32'h8765_4321;
#10;
if ( out !== 32'h9775_5779 | zero !== 1'b0)
$display("\t%s0x12345678 | 0x87654321 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h97755779, 1'b0, "\033[0m");
f = 3'b001; // 0 | -1
a = 32'h0000_0000;
b = 32'hFFFF_FFFF;
#10;
if ( out !== 32'hFFFF_FFFF | zero !== 1'b0)
$display("\t%s0x0 | 0xFFFFFFFF failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'hFFFFFFFF, 1'b0, "\033[0m");
f = 3'b001; // 0 | 0
a = 32'h0000_0000;
b = 32'h0000_0000;
#10;
if ( out !== 32'h0 | zero !== 1'b1)
$display("\t%s0 | 0 failed.\tExpected out = 0x%0x, z = %b%s","\033[0;31m", 32'h0, 1'b1, "\033[0m");
$finish;
end
endmodule
Improvement to the ALU code or the test bench will be appreciated.
Answer: For the design
Issues
The biggest issues with your code is out is an inferred latch because the condition f == 3'b011 is undefined. This type of latch is not ideal as they can cause area and timing issues. To remove the latch, simply assign out to a determinate value; out = a, out = b, out = 0, or other constant (not out = out which is still a latch).
Depending on your synthesizer, tmp may be treated as a latch. If it is, you should move the tmp = a - b above the if/case statement. Or code STL differently and not need tmp.
Improvements
Other than using declaring the port data types as logic, you have not used any SystemVerilog constructs.
always @(a, b,f) is a one Verilog 2001 way of declaring a combinational block and its sensitivity list. Verilog 1995 required or instead of ,. Verilog 2001 also introduced auto-sensitivity (@* or the synonymous @(*)) which is preferred over manually managed sensitivity lists; especially when the list is long. Your @(a, b,f) is not wrong; it could be better. For more refer to IEEE1800-2012 § 9.4.2.2 Implicit event_expression list.
SystemVerilog when one step further and introduced always_comb as an improvement over always @*. always_comb throws a compiling error when one of the variables it is assigning is also assigned by any other block (if not caught here it would have been an error in synthesis). It also allows the designer to specify the intention of the block to lint, lec, synthesis, and other tools that the block is combinational logic. This allows those tools to flag an warning/error if they detect a latch in the logic. (There is always_latch for when you want a latch). For more refer to IEEE1800-2012 § 9.2.2.2.2 always_comb compared to always @*.
You may consider changing chained else-ifs statements into an case-statement. It can be easier for the synthesizer to detect a a full case logic with a case statement than else-if statements. Since you are using SystemVerilog, you may want to consider the unique or priority prefix depending your your target area/performance/design-constraints.
For the test bench
The test bench is very brute forced testing only about 21 conditions; not testing an conditions where f == 3'b100 or f == 3'b101. There are trillions of legal combinations. It unreasonable to check all of them, but you need to check the major conferrers: all legal values of f, overflow, underflow, and random values.
I suggest adding a clock to your test bench to synchronizes randomization and checking. This way you can randomize your values and use assertions to check them. Read about assertions in IEEE1800-2012 § 16. Example of an assertion:
a_SLT : assert property(@(posedge clk) f==3'b111 |-> out == (a<b))
else $error("%0t : STL failed out == %0h expected %0h with a = %h, b = %h, was out = %h", $time, out, (a<b), a, b);
You might also want to look into functional coverage (IEEE1800-2012 § 19), to get an idea that your test bench has covered and spot possible coverage holes.
There are plenty of advance test bench practices and strategies; such as UVM and formal analysis, that I will not cover. For your assignment automated checking and constrained randomization should get you what you need.
Reminder: a and b are unsigned, therefore 32'hFFFF_FFFF is 4294967295, not -1 | {
"domain": "codereview.stackexchange",
"id": 24675,
"tags": "beginner, verilog, hdl"
} |
Synthesis Golf VI: Muscarine | Question: October's synthesis golf is rather late - apologies. I've chosen a slightly different-looking target this round, with the aim of inspiring a wider variety of routes.
Muscarine is a natural product found in some mushrooms. Most notably, it is a good agonist of one class of acetylcholine receptors - these receptors are therefore called muscarinic acetylcholine receptors.
The only conditions for this round are that:
the synthesis must be enantioselective; and
no more than one chiral centre may be purchased in the form of a building block. (Chiral catalysts, ligands, auxiliaries, etc. do not fall under this rule.)
Otherwise, any commercially available starting material (as usual, in the Sigma–Aldrich catalogue) is fair game! There is also no restriction on the counterion.
Answer: 1. Retrosynthesis
This proposed route relies on being able to make the tetrahydrofuran via some type of ring closing etherification.
Through disconnection of the C-O bond, a linear diol is revealed which may be accessed in several ways.
Initially, I'd thought about doing a Sharpless dihydroxylation of (Z)-hex-1,4-diene, however the starting material is surprisingly expensive and there are few literature reports of doing this dihydroxylation (and in those instances where it has been carried out, the e.e. and regioselectivity has been moderate to poor).
A different disconnection, using the chiral pool (less elegant, perhaps, but the starting material is cheap and ensures good selectivity) allowsthe diol can be disconnected across the C-C bond, revealing an aldehyde which is derived from a lactate ester.
2. Forward synthesis
Reagents and conditions: (a) TIPSOTF, 2,6-lutidine (b) DIBAL; allyl-MgBr (c) BnTCA, PPTS (d) Shi epoxidation (e) HF-Py, Py (f) TsCl, Py (g) NaN3 (h) H2, Pd/C (i) MeI
The synthesis starts with protection of methyl lactate with a TIPS group - a silyl group is chosen here as upon deprotection under fluoride conditions later in the synthesis, the THF should spontaneously close.
The TIPS protected methyl lactate may then be treated with DIBAL to afford the lactaldehyde. This must be used immediately to prevent epimerisation. In all likelihood, this could be achieved by adding a solution of allyl magnesium bromide to the same pot as the DIBAL reduction but failing this, there is good precedence for a reagent controlled boron mediated allylation (Tet. Lett. 2003, 44, 1737).
The terminal olefin installed by allylation is now setup to do a Shi epoxidation (Sharpless is terrible at terminal olefins) providing the final stereocentre. Treatment of the substrate with HF-Py should then deprotect the TIPS ether with concomitant formation of the THF ring system.
Finally, functional group transformations provide the desired natural product as shown in the scheme. Some issues may be observed when attempting to methylate the amine in the presence of the alcohol however from experience of similar substrates, the amine is likely to quaternise faster than the alcohol methylates and as such careful control of stoichiometry and conditions should allow the desired product to be isolated as the major component.
Summary
A synthesis of muscarine is proposed in 9 steps (linear) starting from methyl lactate, with the remaining stereocentres being installed using a substrate controlled allylation and a reagent controlled asymmetric epoxidation.
Addition steps could likely be cut out by (1) not protecting the alcohol that is currently protected with a benzyl group and (2) making use of an asymmetric aziridination in place of the Shi epoxidation. | {
"domain": "chemistry.stackexchange",
"id": 9043,
"tags": "synthesis-golf"
} |
For Djikstra's algorithm, why are we surely done if we update all edges $|V|-1$ times? | Question: Apparently, if we use Djikstra's algorithm to find the shortest path between the root node and all other nodes in a weighted graph with no negative cycles, we are done after updating the distance of each node $|V| - 1$ times.
This puzzles me because I think that a single round of breadth first search is enough. Why must we do $|V| - 1$ of these searches?
Answer: Either you mixed up two algorithms or you misinterpreted an upper bound. One level of a BFS with priority queue is enough, but during this one round a neighbor of the root may be updated $|V|-1$ times, therefore the update operation needs to be efficient. | {
"domain": "cs.stackexchange",
"id": 1485,
"tags": "algorithms, graphs, shortest-path, correctness-proof"
} |
Reverse engineering Darkstone game archives | Question: Reverse engineering old games is something I do every now and then. This time, I took a shot at this old RPG called Darkstone.
The bulk of the game's data is stored in archive files with the .MTF extension (not related to the Microsoft Tape Format). A partial description for this file format can be found here. The format uses a custom compression algorithm (I don't know much about compression, so maybe it has a standard name?), which is fairly simple. The previous link describes it, but as @JS1 noted in a comment, the description seems to get the order of bits wrong, the top six bits of a word are the count, the other 10 are the offset.
So I went on and wrote the following code to unpack an MTF archive into normal files. I used plain C this time. Let me know if this can be improved in any way. Performance was not the main concern, but It wouldn't be bad to make it faster if possible, though I don't want to sacrifice readability for that. I also tried to make it more or less like a library, in case someone wants to incorporate the code into another project.
mtf.h:
#ifndef DARKSTONE_MTF_H
#define DARKSTONE_MTF_H
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
/* ========================================================
* DarkStone MTF game archive structures:
* ======================================================== */
typedef struct mtf_compressed_header {
uint8_t magic1; // (Apparently) always 0xAE (174) or 0xAF (175) for a compressed file.
uint8_t magic2; // (Apparently) always 0xBE (190) for a compressed file.
uint16_t unknown; // Unknown data. Seems to repeat a lot. We can decompressed without it anyways.
uint32_t compressedSize; // Advertised compressed size in byte of the entry.
uint32_t decompressedSize; // Decompressed size from `mtf_file_entry_t` is repeated here.
} mtf_compressed_header_t;
typedef struct mtf_file_entry {
char * filename; // Allocated in the heap; Read from file. Null terminated.
uint32_t filenameLength; // Filename length, including null terminator.
uint32_t dataOffset; // Absolute MTF archive offset to this file entry.
uint32_t decompressedSize; // Decompressed size in bytes of the file.
} mtf_file_entry_t;
typedef struct mtf_file {
FILE * osFileHandle; // MTF file handle returned by fopen().
mtf_file_entry_t * fileEntries; // Sorted alphabetically by filename.
uint32_t fileEntryCount; // Size of fileEntries[].
} mtf_file_t;
enum {
MTF_EXTRACT_ALL = -1,
MTF_MAX_PATH_LEN = 1024
};
/* ========================================================
* Decompression functions:
* ======================================================== */
/*
* Opens a DarkStone MTF archive for reading.
* It is safe to call mtf_file_close() even if this function fails.
*/
bool mtf_file_open(mtf_file_t * mtf, const char * filename);
/*
* Closes an MTF archive previously opened by mtf_file_open().
*/
void mtf_file_close(mtf_file_t * mtf);
/*
* Extract the contents of an MTF archive to normal files
* in the local file system. Overwrites existing files.
* The internal directory structure of the MTF is preserved.
*
* You may specify a maximum number of files to extract or
* pass MTF_EXTRACT_ALL to `maxFileToExtract` and allow the
* extraction of all files in the archive. `filesExtracted`
* is optional and may be null. If provided, it will output
* the number of files successfully extracted.
*/
bool mtf_file_extract_batch(const char * srcMtfFile, const char * destPath,
int maxFileToExtract, int * filesExtracted);
/*
* All the above functions will set a global string with
* an error description if something goes wrong. You can
* recover the error description by calling this function
* after a failure happens.
*
* Calling this function will clear the internal error string.
*/
const char * mtf_get_last_error(void);
#endif // DARKSTONE_MTF_H
mtf.c:
#include "mtf.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
/* ========================================================
* mtf_get_last_error()/mtf_error():
* ======================================================== */
// Some compilers have Thread Local Storage support
// which can be used to make this error string thread
// safe, so that parallel file processing wouldn't step
// in each other's toes when reporting errors...
static const char * mtfLastErrorStr = "";
static inline bool mtf_error(const char * message) {
mtfLastErrorStr = (message != NULL) ? message : "";
// Always returns false to allow using "return mtf_error(...);"
return false;
}
const char * mtf_get_last_error(void) {
const char * err = mtfLastErrorStr;
mtfLastErrorStr = "";
return err;
}
/* ========================================================
* Path/directory helpers:
* ======================================================== */
static inline bool mtf_is_ascii(int ch) {
return (ch >= 0) && (ch < 128);
}
static void mtf_fix_filepath(char * pathInOut) {
assert(pathInOut != NULL);
//
// DarkStone used Windows-style paths, with
// backslashes as directory separator.
//
// Also, there are a couple filenames in some
// archives that use extended ASCII characters,
// like accentuations (é, á, ç, etc), which don't
// play very well on the Mac file system. So I'm
// replacing such occurrences with a question mark '?'.
//
char * p = pathInOut;
while (*p != '\0') {
if (*p == '\\') {
*p = '/';
} else if (!mtf_is_ascii(*p)) {
*p = '?';
}
++p;
}
}
static bool mtf_make_directory(const char * dirPath) {
assert(dirPath != NULL);
// NOTE: stat/mkdir are defined differently on Windows,
// so this will need a fix when porting this to Win/VS.
//
struct stat dirStat;
if (stat(dirPath, &dirStat) != 0) {
if (mkdir(dirPath, 0777) != 0) {
return mtf_error("Impossible to create directory! mkdir(0777) failed.");
}
} else {
// Path already exists.
if (!S_ISDIR(dirStat.st_mode)) {
// Looks like there is a file with the same name as the directory.
return mtf_error("Can't mkdir()! Path points to a file.");
}
}
return true;
}
static bool mtf_make_path(const char * pathEndedWithSeparatorOrFilename) {
assert(pathEndedWithSeparatorOrFilename != NULL);
assert(strlen(pathEndedWithSeparatorOrFilename) < MTF_MAX_PATH_LEN);
char dirPath[MTF_MAX_PATH_LEN];
strncpy(dirPath, pathEndedWithSeparatorOrFilename, MTF_MAX_PATH_LEN);
char * pPath = dirPath;
while (*pPath != '\0') {
if (*pPath == '/' || *pPath == '\\') {
*pPath = '\0';
if (!mtf_make_directory(dirPath)) {
return false;
}
*pPath = '/';
}
++pPath;
}
return true;
}
/* ========================================================
* mtf_readX():
* ======================================================== */
static inline bool mtf_read32(FILE * fileIn, uint32_t * dword) {
if (fread(dword, sizeof(*dword), 1, fileIn) != 1) {
return mtf_error("mtf_read32() failed!");
}
return true;
}
static inline bool mtf_read16(FILE * fileIn, uint16_t * word) {
if (fread(word, sizeof(*word), 1, fileIn) != 1) {
return mtf_error("mtf_read16() failed!");
}
return true;
}
static inline bool mtf_read8(FILE * fileIn, uint8_t * byte) {
int ch = fgetc(fileIn);
if (ch == EOF) {
return mtf_error("mtf_read8() failed!");
}
*byte = (uint8_t)ch;
return true;
}
/* ========================================================
* mtf_read_compressed_header():
* ======================================================== */
static inline bool mtf_read_compressed_header(FILE * fileIn, uint32_t offset,
mtf_compressed_header_t * header) {
assert(fileIn != NULL);
assert(header != NULL);
fseek(fileIn, offset, SEEK_SET);
fread(header, sizeof(*header), 1, fileIn);
return !ferror(fileIn);
}
/* ========================================================
* mtf_is_compressed():
* ======================================================== */
static inline bool mtf_is_compressed(const mtf_compressed_header_t * header) {
//
// These magic numbers are from Xentax Wiki:
// http://wiki.xentax.com/index.php?title=Darkstone
//
if (header->magic1 == 0xAE && header->magic2 == 0xBE) {
return true;
}
if (header->magic1 == 0xAF && header->magic2 == 0xBE) {
return true;
}
return false;
}
/* ========================================================
* mtf_decompress_write_file():
* ======================================================== */
static bool mtf_decompress_write_file(FILE * fileIn, FILE * fileOut, uint32_t decompressedSize,
const mtf_compressed_header_t * compressedHeader) {
// NOTE: `fileIn` must to point past the compressed header!
assert(fileIn != NULL);
assert(fileOut != NULL);
assert(compressedHeader != NULL);
assert(decompressedSize != 0);
// Would be better as a compile-time assert. I'm just being lazy...
assert(sizeof(mtf_compressed_header_t) == 12 && "Unexpected size for this struct!");
uint8_t * decompressBuffer = malloc(decompressedSize);
uint8_t * decompressedPtr = decompressBuffer;
if (decompressBuffer == NULL) {
return mtf_error("Failed to malloc decompression buffer!");
}
bool hadError = false;
int bytesRead = sizeof(mtf_compressed_header_t);
int bytesLeft = decompressedSize;
// Do one byte at a time. Repeat until we have processed
// the advertised decompressed size in bytes.
while (bytesLeft) {
// Each compressed block/chunk is prefixed by a one byte header.
// Each bit in this chunk tells us how to handle the next byte
// read from the file.
uint8_t chunkBits;
if (!mtf_read8(fileIn, &chunkBits)) {
hadError = true;
goto BAIL;
}
++bytesRead;
// For each bit in the chunk header, staring from
// the lower/right-hand bit (little endian)
for (int b = 0; b < 8; ++b) {
int flag = chunkBits & (1 << b);
// If the bit is set, read the next byte unchanged:
if (flag) {
uint8_t byte;
if (!mtf_read8(fileIn, &byte)) {
hadError = true;
goto BAIL;
}
*decompressedPtr++ = byte;
++bytesRead;
--bytesLeft;
} else {
// If the flag bit is zero, the next two bytes indicate
// the offset and byte count to replicate from what was
// already read. This seems somewhat similar to RLE compression...
uint16_t word;
if (!mtf_read16(fileIn, &word)) {
hadError = true;
goto BAIL;
}
bytesRead += 2;
if (word == 0) {
// Looks like a few entries have padding or something.
// When we get here, bytesLeft is already zero, so this seems benign...
// Q: Is the padding to align the buffers to a given boundary?
break;
}
int count = (word >> 10); // Top 6 bits of the word
int offset = (word & 0x03FF); // Lower 10 bits of the word
// Copy count+3 bytes staring at offset to the end of the decompression buffer,
// as explained here: http://wiki.xentax.com/index.php?title=Darkstone
for (int n = 0; n < count + 3; ++n) {
*decompressedPtr = *(decompressedPtr - offset);
++decompressedPtr;
--bytesLeft;
}
if (bytesLeft < 0) {
mtf_error("Compressed/decompressed size mismatch!");
hadError = true;
goto BAIL;
}
}
}
}
BAIL:
if (!hadError) {
if (fwrite(decompressBuffer, 1, decompressedSize, fileOut) != decompressedSize) {
mtf_error("Failed to write decompressed file data!");
hadError = true;
}
}
free(decompressBuffer);
return !hadError;
}
/* ========================================================
* mtf_write_file():
* ======================================================== */
static bool mtf_write_file(FILE * fileIn, FILE * fileOut,
uint32_t sizeInBytes, uint32_t readOffset) {
assert(fileIn != NULL);
assert(fileOut != NULL);
void * readBuffer = malloc(sizeInBytes);
if (readBuffer == NULL) {
return mtf_error("mtf_write_file(): Failed to malloc buffer!");
}
if (fseek(fileIn, readOffset, SEEK_SET) != 0) {
free(readBuffer);
return mtf_error("mtf_write_file(): Can't fseek() entry offset!");
}
if (fread(readBuffer, 1, sizeInBytes, fileIn) != sizeInBytes) {
free(readBuffer);
return mtf_error("mtf_write_file(): Can't read source file entry!");
}
if (fwrite(readBuffer, 1, sizeInBytes, fileOut) != sizeInBytes) {
free(readBuffer);
return mtf_error("mtf_write_file(): Can't write dest file!");
}
free(readBuffer);
return true;
}
/* ========================================================
* mtf_sort_by_filename() => qsort() predicate:
* ======================================================== */
static int mtf_sort_by_filename(const void * a, const void * b) {
return strcmp(((const mtf_file_entry_t *)a)->filename,
((const mtf_file_entry_t *)b)->filename);
}
/* ========================================================
* mtf_file_open():
* ======================================================== */
bool mtf_file_open(mtf_file_t * mtf, const char * filename) {
assert(mtf != NULL);
assert(filename != NULL && *filename != '\0');
mtf->osFileHandle = fopen(filename, "rb");
mtf->fileEntries = NULL;
mtf->fileEntryCount = 0;
if (mtf->osFileHandle == NULL) {
return mtf_error("Can't open input MTF file!");
}
// First 4 bytes are the number of files in the MTF archive.
if (!mtf_read32(mtf->osFileHandle, &mtf->fileEntryCount)) {
mtf_file_close(mtf);
return mtf_error("Failed to read file entry count.");
}
if (mtf->fileEntryCount == 0) {
mtf_file_close(mtf);
return mtf_error("MTF appears to have no file! fileEntryCount == 0.");
}
mtf->fileEntries = calloc(mtf->fileEntryCount, sizeof(mtf->fileEntries[0]));
if (mtf->fileEntries == NULL) {
mtf_file_close(mtf);
return mtf_error("Failed to malloc MTF file entries!");
}
// Read in the file entry list:
for (uint32_t e = 0; e < mtf->fileEntryCount; ++e) {
mtf_file_entry_t * entry = &mtf->fileEntries[e];
if (!mtf_read32(mtf->osFileHandle, &entry->filenameLength)) {
mtf_file_close(mtf);
return mtf_error("file to read a filename length.");
}
// Strings stored in the file are supposedly already null terminated,
// but it is better not to rely on that and alloc an extra byte, then set it to \0.
entry->filename = malloc(entry->filenameLength + 1);
if (entry->filename == NULL) {
mtf_file_close(mtf);
return mtf_error("Failed to malloc filename string!");
}
// Reading a string or not, we continue...
fread(entry->filename, 1, entry->filenameLength, mtf->osFileHandle);
entry->filename[entry->filenameLength] = '\0';
// Data start offset and decompressed size in bytes (for this file entry):
if (!mtf_read32(mtf->osFileHandle, &entry->dataOffset) ||
!mtf_read32(mtf->osFileHandle, &entry->decompressedSize)) {
mtf_file_close(mtf);
return mtf_error("Failed to read data offset or size.");
}
}
// Entries are probably already in sorted order, but since
// we don't have a formal specification to ensure that,
// sort them by filename now:
qsort(mtf->fileEntries, mtf->fileEntryCount, sizeof(mtf->fileEntries[0]), &mtf_sort_by_filename);
return true;
}
/* ========================================================
* mtf_file_close():
* ======================================================== */
void mtf_file_close(mtf_file_t * mtf) {
if (mtf == NULL) {
return; // Can be called even for an invalid file/pointer.
}
if (mtf->osFileHandle != NULL) {
fclose(mtf->osFileHandle);
mtf->osFileHandle = NULL;
}
if (mtf->fileEntries != NULL) {
for (uint32_t e = 0; e < mtf->fileEntryCount; ++e) {
free(mtf->fileEntries[e].filename);
}
free(mtf->fileEntries);
mtf->fileEntries = NULL;
mtf->fileEntryCount = 0;
}
}
/* ========================================================
* mtf_file_extract_batch():
* ======================================================== */
bool mtf_file_extract_batch(const char * srcMtfFile, const char * destPath,
int maxFileToExtract, int * filesExtracted) {
assert(srcMtfFile != NULL && *srcMtfFile != '\0');
assert(destPath != NULL && *destPath != '\0');
// `maxFileToExtract` can be zero, negative or MTF_EXTRACT_ALL to extract everything.
// `filesExtracted` is optional and may be null.
// Attempt to open and read the headers and file entry list:
mtf_file_t mtf;
if (!mtf_file_open(&mtf, srcMtfFile)) {
return false;
}
// Data for the individual files follow.
// Now read each entry, decompress and write the output files.
char extractionPath[MTF_MAX_PATH_LEN];
int successCount = 0;
for (uint32_t e = 0; e < mtf.fileEntryCount; ++e) {
const mtf_file_entry_t * entry = &mtf.fileEntries[e];
// A compressed file is prefixed by a 12 bytes compression info
// header. If uncompressed, then there is no header; Problem
// is, we can only tell if the file is compressed after reading in
// the 12 bytes of a header, so if it is not compressed, we have
// to seek back 12 bytes and then read the whole uncompressed block.
mtf_compressed_header_t compressedHeader;
if (!mtf_read_compressed_header(mtf.osFileHandle, entry->dataOffset, &compressedHeader)) {
mtf_file_close(&mtf);
return mtf_error("Failed to read a compression info header!");
}
// Set up the output file path, replacing Windows backslashes by forward slashes:
snprintf(extractionPath, MTF_MAX_PATH_LEN, "%s/%s", destPath, entry->filename);
mtf_fix_filepath(extractionPath);
// Output path might not exist yet. This has no side effects if it does.
mtf_make_path(extractionPath);
FILE * fileOut = fopen(extractionPath, "wb");
if (fileOut == NULL) {
mtf_file_close(&mtf);
return mtf_error("Can't create output file on extraction path!");
}
bool success;
if (mtf_is_compressed(&compressedHeader)) {
// Pointing to the correct offset thanks to mtf_read_compressed_header().
success = mtf_decompress_write_file(mtf.osFileHandle,
fileOut, entry->decompressedSize, &compressedHeader);
} else {
success = mtf_write_file(mtf.osFileHandle,
fileOut, entry->decompressedSize, entry->dataOffset);
}
fclose(fileOut);
if (success) {
++successCount;
if (maxFileToExtract > 0 && successCount == maxFileToExtract) {
break;
}
}
}
if (filesExtracted != NULL) {
*filesExtracted = successCount;
}
mtf_file_close(&mtf);
return true;
}
And here's a simple command line driver to decompress a whole archive:
#include "mtf.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
static void print_usage(const char * progName) {
printf(
"\n"
"Usage:\n"
"$ %s <input_mtf> <output_dir>\n"
" Decompresses each file in the given MTF archive to the provided path.\n"
" Creates directories as needed. Existing files are overwritten.\n"
"\n"
"Usage:\n"
"$ %s --help | -h\n"
" Prints this help text.\n"
"\n",
progName, progName);
}
int main(int argc, const char * argv[]) {
if (argc < 2) {
print_usage(argv[0]);
return EXIT_FAILURE;
}
// Printing help is not treated as an error.
if (strcmp(argv[1], "-h") == 0 || strcmp(argv[1], "--help") == 0) {
print_usage(argv[0]);
return EXIT_SUCCESS;
}
// From here on we need an input filename and an output path.
if (argc < 3) {
print_usage(argv[0]);
return EXIT_FAILURE;
}
const char * mtfFilename = argv[1];
const char * outputDir = argv[2];
int filesExtracted = 0;
bool success = mtf_file_extract_batch(mtfFilename, outputDir, MTF_EXTRACT_ALL, &filesExtracted);
if (success) {
printf("Successfully extracted %d files from MTF archive \"%s\".\n", filesExtracted, mtfFilename);
return EXIT_SUCCESS;
} else {
fprintf(stderr, "Error while extracting \"%s\": %s\n", mtfFilename, mtf_get_last_error());
fprintf(stderr, "Managed to extract %d files.\n", filesExtracted);
return EXIT_FAILURE;
}
}
Link to the project.
Answer: How large are these files?
My suggestion would be to load the entire compressed file into memory instead of using multiple fread calls.
If you are storing the decompressed contents in memory, surely you can also store the compressed file in memory as well. Most of your run time is spent fetching bytes with fread, so being able to replace those calls with direct memory accesses should improve performance quite a bit.
Another suggestion I have is to use memcpy instead of this loop:
for (int n = 0; n < count + 3; ++n) {
*decompressedPtr = *(decompressedPtr - offset);
++decompressedPtr;
--bytesLeft;
}
memcpy is a standard way to copy blocks of memory, and most C libraries implement it with special processor instructions to speed it up.
Another coding style issue... instead of:
while (bytesLeft) {
...
bytesLeft -= count;
if (bytesLeft < 0) { ...error... }
}
I prefer:
while (bytesLeft > 0) {
...
bytesLeft -= count;
}
if (bytesLeft < 0) { ...error... }
It's a safer way to implement the loop as the check bytesLeft > 0 is always performed. The way you've written it the check will get missed if someone uses continue somewhere in the loop body. | {
"domain": "codereview.stackexchange",
"id": 15475,
"tags": "c, file-system, compression, c99"
} |
Applying Viterbi algorithm to compensate for ISI in PSK31 | Question: I am implementing a receiver for the amateur radio mode PSK31. It is effectively a BPSK modulation with a pulse shape of a raised cosine twice the width of one symbol. Here are four consecutive pulses, with 4 samples per symbol, as they would be transmitted:
The trouble is matched filtering introduces ISI. 4 pulses again, after a matched filter in the receiver:
If my understanding is correct, the issue here is one pulse is non-zero at the ideal decision point of the adjacent pulses.
However, my thinking is this is predictable, and the interference never extends beyond the adjacent symbols. So if the previous and next symbols are known the ISI can be cancelled by subtraction of the interfering adjacent pulses.
The trouble is the previous symbol isn't known with certainty. I think the Viterbi algorithm is a possible solution. And I've watched some videos on the algorithm and think I understand how the algorithm works, given a particular trellis.
What I don't understand is how to apply the Viterbi algorithm to this particular problem. The material I've found is either theoretical with math that blows my head off, or some other application of the algorithm. Could someone please provide an intuitive, practical explanation in this context?
Answer: (After some deliberation, I have a guess at answering my own question. Someone tell me if I'm wrong.)
The first step is to look at the contribution of each pulse at the decision point. The filter is normalized so a single pulse results in 1. And in this particular case, the tails of adjacent pulses work out to 1/6.
We can then look at any possible combination of three pulses, summing their contributions together. If we say the three pulses are $x_{n-1},\ x_{n},\ x_{n+1}$, then the output $y_n$ at the decision point is:
$$ y_n = 1/6\:x_{n-1} + x_{n} + 1/6\:x_{n+1}$$
To give some examples:
111: 1/6 + 1 + 1/6 = 1.33
010: -1/6 + 1 + -1/6 = 0.67
110: 1/6 + 1 + -1/6 = 1
Say state 01 means the previous bit was a 0, and the one before a 1. We can then ask, "if we add another bit, what does the output become?" For example, if the next bit is a zero that yields 001, with an output of -1 according to the formulation above.
We can then build a state table:
╔═══════╦═══════╦════════╗
║ input ║ state ║ output ║
╠═══════╬═══════╬════════╣
║ 0 ║ 00 ║ -1.33 ║
║ 1 ║ 00 ║ -1 ║
║ 0 ║ 01 ║ -1 ║
║ 1 ║ 01 ║ -0.67 ║
║ 0 ║ 10 ║ 0.67 ║
║ 1 ║ 10 ║ 1 ║
║ 0 ║ 11 ║ 1 ║
║ 1 ║ 11 ║ 1.33 ║
╚═══════╩═══════╩════════╝
And the associated trellis diagram:
The notation "0/-1.33" means "to send a 0 from this state, the output is -1.33".
Either the magnitude or the square of the difference between the observed values and the expected values can be used as the metric. The Viterbi algorithm will find the path through the trellis with the lowest metric regardless of what metric is used, as long as it's something that monotonically increases with the distance between observation and expected.
The Viterbi algorithm can then be followed in the usual way to determine the most likely sequence that explains the received signal, knowing how adjacent pulses will influence the received values.
I've found these videos especially helpful in understanding the evaluation of the Viterbi algorithm:
Viterbi Algorithm, by AppBooke Xperience
Viterbi Algorithm, by Keith Chugg | {
"domain": "dsp.stackexchange",
"id": 5264,
"tags": "bpsk, viterbi-algorithm"
} |
Questions about Maxwell's demon | Question: I've been reading about Maxwell's demon and the current accepted solution for it (deleting information results in an increase in entropy), but there are two things I don't understand about the solution.
Suppose the demon has a large enough memory to store all the information about the system, making deletion unnecessary. Wouldn't the 2nd law of thermodynamics be broken in that case?
But even that aside, there is necessarily some amount of time between when the data is stored and when it is deleted, so wouldn't the 2nd law be broken during that period of time?
Answer: The Demon's memory store acts like an entropy reservoir. In the process of measuring the speed of each molecule, the Demon reproduces the random pattern of fast and slow gas molecules on either side of the barrier in the memory store, so the entropy for the entire system is exactly the same. When the Demon deletes the data, it is returning it from $2^n$ states to one state, and thus reducing the entropy in exactly the same way as separating the gas molecules would.
For the sake of illustration, let's suppose the Demon is using an abacus to store the data. Initially, the gas is in one of $2^n$ states, while the abacus (initially empty) is in one of one possible states. We have $n$ bits of entropy.
Now the demon measures the speed of each molecule and checks it against the threshold, gaining one bit of information per measurement. It uses this information to sort the molecules. So now the gas is in one of only one possible states, and the abacus is in one of $2^n$ possible states, exactly reproducing (the relevant part of) the initial state of the gas. We can think of the beads on the abacus wires as molecules in one of two compartments, at the top and bottom of the abacus.
The number of states of the system as a whole (gas + Demon) is exactly the same: $2^n$. The process is reversible. The entropy has neither increased nor decreased, the 2nd law of thermodynmamics has not been violated.
When the information is deleted, the Demon has to go through the beads, check whether it is at the top or bottom of the wire, and push it in the appropriate direction to cancel out the information. It is doing exactly the same sort of task as it was in sorting the gas molecules - going from one of $2^n$ states to a single defined state. And so, for exactly the same reason, it has to tranfer the information gained from looking at each bead-bit somewhere else, into some other reservoir of entropy. | {
"domain": "physics.stackexchange",
"id": 90677,
"tags": "thermodynamics, statistical-mechanics, temperature, entropy, reversibility"
} |
Why is particle number conserved in non-relativistic limit of QED? | Question: I am trying to recover ordinary quantum mechanics from QED. One main feature of quantum mechanics is the conservation of particle and antiparticle number separately, i.e. $[N_{e^-},H] = [N_{e^+},H] = 0$. I would like to show this.
The QED Hamiltonian is given by $H = \int d^3 x H_0 + H_{int}$ where $H_{int} = e \bar\psi A^\mu\gamma_\mu \psi$. The free Hamiltonian commutes with the number operators.
The fields are given by
$$\psi = \sum_s\int\frac{d^3k}{\sqrt{2\pi}^3\sqrt{2E}} \left(e^{ikx} u_s(k) c_{ks} + e^{-ikx} v_s(k) d^\dagger_{ks}\right) = \psi^+ + \psi^-$$
$$A^\mu = \sum_s\int\frac{d^3k}{\sqrt{2\pi}^3\sqrt{2E}} \left(e^{ikx} \varepsilon_s^\mu a_{ks} + e^{-ikx} \varepsilon_s^{*\mu} a^\dagger_{ks}\right)$$
I already calculated the commutator of the number operator $N_{e^-} = \int d^3k \sum_s c^\dagger_{ks} c_{ks}$ with the fields:
$$[N_{e^-}, \psi] = -\psi^+$$
$$[N_{e^-}, \bar{\psi}] = \bar{\psi}^+$$
So the commutator with the interaction Hamiltonian is
$$[N_{e^-},\bar{\psi}A^\mu\gamma_\mu\psi] = -\bar{\psi}A^\mu\gamma_\mu\psi^+ + \bar{\psi}^+A^\mu\gamma_\mu\psi = \bar{\psi}^+A^\mu\gamma_\mu\psi^- - \bar{\psi}^-A^\mu\gamma_\mu\psi^+$$
In order for the particle number to be conserved (in the non relativistic case) we need that the commutator vanishes for $\vec{p} \to 0$.
If I insert the field operators and integrate over $d^3x$ I find (spin indices dropped)
$$\bar{\psi}^+A^\mu\gamma_\mu\psi^- = \int \frac{d^3p d^3k}{\sqrt{2\pi}^3\sqrt{2E_p}\sqrt{2E_k}\sqrt{2|p+k|}} \bar{u}(p)\left(\varepsilon \gamma a_{p+k} + \varepsilon^* \gamma a^\dagger_{-p-k}\right)v(k) c^\dagger_p d^\dagger_k$$
For $p,k \to 0$ the denominator become $E_k,E_p \to m$ and $|p-k| \to 0$ while in the nominator $\bar{u} \gamma \varepsilon v = O(1) \cdot m$. So since the photon energy drops to $0$ the commutator becomes infinity.
Therefore there should be no particle number conservation in the non relativistic limit. How can one justify the particle number conservation?
Answer: The small-coupling expansion treats QED as a series of corrections to a different model, namely "free" QED without the interaction term. In free QED, the operators $c,c^\dagger,d,d^\dagger$ are indeed the creation and annihilation operators for individual particles, but that is no longer true in the original model with the interaction term. As a result, the operator $N_{e^-}$ shown in the OP is not really the number operator for electrons. It is in free QED, but not in the presence of the interaction term. To see this, notice that the vacuum state in QED (with the interaction term) is not annihilated by $c_{ks}$, so with $N_{e^-}$ defined as in the OP, the operator $N_{e^-}$ has a non-zero expectation value in the vacuum state, even though the vacuum state is empty by definition.
For this reason, deriving the non-relativistic limit of QED directly is not straightforward at all. A different approach, which is indirect but much easier, is to use the Effective Field Theory idea, as explained in this review:
Lepage (1989), "What is renormalization?" Boulder ASI, pages 483-508, https://arxiv.org/abs/hep-ph/0506330.
In this approach, we appeal to the model's symmetries and other general intuition to come up with a list of fields that the non-relativistic approximation should involve, like one two-component spinor field for the electron, one for the positron, and an EM gauge field. Then we write down the most general Lagrangian that we can write down using those fields, using non-relativistic kinetic terms and discarding terms that we know will be negligible at sufficiently low energies. (This can be inferred by considering the units of each term's coefficient.) Then, if $n$ denotes the number of independent coefficients in the remaining Lagrangian, we calculate $n$ different physical quantities and compare them to the same $n$ physical quantities calculated using the original model (relativistic QED in this case) in order to fix the values of the $n$ coefficients. The result is a Lagrangian that qualifies as a non-relativistic approximation to QED. This technique (and the result) is called NRQED. | {
"domain": "physics.stackexchange",
"id": 54954,
"tags": "quantum-mechanics, quantum-field-theory, quantum-electrodynamics"
} |
What is the "complementary map" of a channel with given Kraus decomposition? | Question: I have a quantum map described by the following Kraus operators
$$A_0 = c_0 \begin{pmatrix}
1 & 0\\
0 & 1 \end{pmatrix},
\qquad A_1 = c_1 \begin{pmatrix}
1 & 0\\
0 & -1 \end{pmatrix},$$
such that $c_0^2 + c_1^2 = 1$. I want to know what is a complementary map and how to construct the same for the above-mentioned channel?
Edit 1: Checked for some literature. Here is the definition of the complementary map equations 37 and 38.
Answer: Let's start by finding a complementary channel for any channel given by a Kraus representation
$$
\Phi(X) = \sum_{k=1}^n A_k X A_k^{\dagger}.
$$
To make the necessary equations clear, let us assume that the channel has the form $\Phi:\mathrm{L}(\mathcal{X})\rightarrow \mathrm{L}(\mathcal{Y})$ for finite-dimensional Hilbert spaces $\mathcal{X}$ and $\mathcal{Y}$. Let us also define $\mathcal{Z} = \mathbb{C}^n$; the complementary channel we will define will take the form $\Psi:\mathrm{L}(\mathcal{X})\rightarrow \mathrm{L}(\mathcal{Z})$. (For the channel in the question itself, we will have $\mathcal{X}$, $\mathcal{Y}$, and $\mathcal{Z}$ all equal to $\mathbb{C}^2$, but it helps nevertheless to assign different names to these spaces.)
Define an operator
$$
A = \sum_{k=1}^n A_k \otimes | k\rangle,
$$
which is a linear operator mapping $\mathcal{X}$ to $\mathcal{Y}\otimes\mathcal{Z}$. This gives us a Stinespring representation
$$
\Phi(X) = \operatorname{Tr}_{\mathcal{Z}} \bigl( A X A^{\dagger}\bigr).
$$
The channel
$$
\Psi(X) = \operatorname{Tr}_{\mathcal{Y}} \bigl( A X A^{\dagger}\bigr)
$$
is therefore complementary to $\Phi$. We can simplify this expression by observing that
$$
A X A^{\dagger} = \sum_{j=1}^n \sum_{k=1}^n A_j X A_k^{\dagger} \otimes | j \rangle \langle k |,
$$
so that
$$
\Psi(X) = \sum_{j=1}^n \sum_{k=1}^n \operatorname{Tr}\bigl(A_j X A_k^{\dagger}\bigr) | j \rangle \langle k |.
$$
There's not too much more we can do with this, except perhaps to use the cyclic property of the trace to obtain the expression
$$
\Psi(X) = \sum_{j=1}^n \sum_{k=1}^n \operatorname{Tr}\bigl(A_k^{\dagger} A_j X\bigr) | j \rangle \langle k |.
$$
Now let's plug in the specific operators from the question to obtain
$$
\Psi(X) = c_0^2 \operatorname{Tr}(X) | 0 \rangle \langle 0 | + c_1^2 \operatorname{Tr}(X) | 1 \rangle \langle 1 | + c_0 c_1 \operatorname{Tr}(\sigma_z X) | 0 \rangle \langle 1 | + c_0 c_1 \operatorname{Tr}(\sigma_z X) | 1 \rangle \langle 0 |.
$$
Here $\sigma_z$ denotes the Pauli-Z operator, which we get because $A_0^{\dagger} A_1 = A_1^{\dagger}A_0 = c_0 c_1 \sigma_z$. (I am assuming $c_0$ and $c_1$ are real numbers.) The expression may look a bit nicer in matrix form:
$$
\Psi\begin{pmatrix} \alpha & \beta\\ \gamma & \delta\end{pmatrix} =
\begin{pmatrix} c_0^2(\alpha + \delta) & c_0 c_1 (\alpha - \delta)\\
c_0 c_1 (\alpha - \delta) & c_1^2 (\alpha + \delta) \end{pmatrix}.
$$
Finally, the question asks for Kraus operators of $\Psi$, which we can get by computing the Choi operator of $\Psi$. In general, this is the operator
$$
J(\Psi) = \sum_{j=1}^n\sum_{k=1}^n \Psi(|j\rangle\langle k|) \otimes |j\rangle\langle k|,
$$
and in this particular case we obtain
$$
J(\Psi) = \begin{pmatrix}
c_0^2 & 0 & c_0 c_1 & 0\\
0 & c_0^2 & 0 & -c_0 c_1 \\
c_0 c_1 & 0 & c_1^2 & 0\\
0 & -c_0 c_1 & 0 & c_1^2
\end{pmatrix}.
$$
This operator has rank 2, which means just 2 Kraus operators suffice. We can get them through a spectral decomposition of $J(\Psi)$. Specifically, we have
$$
J(\Psi) = \begin{pmatrix} c_0\\ 0\\ c_1\\ 0 \end{pmatrix}
\begin{pmatrix} c_0 & 0 & c_1 & 0 \end{pmatrix}
+ \begin{pmatrix} 0\\ c_0\\ 0\\ -c_1 \end{pmatrix}
\begin{pmatrix} 0 & c_0 & 0 & -c_1 \end{pmatrix},
$$
and by "folding up" these vectors we get Kraus operators:
$$
\Psi(X) = B_0 X B_0^{\dagger} + B_1 X B_1^{\dagger}
$$
where
$$
B_0 = \begin{pmatrix} c_0 & 0\\ c_1 & 0 \end{pmatrix}
\quad\text{and}\quad
B_1 = \begin{pmatrix} 0 & c_0 \\ 0 & -c_1 \end{pmatrix}.
$$ | {
"domain": "quantumcomputing.stackexchange",
"id": 3627,
"tags": "quantum-operation, kraus-representation"
} |
DH parameters of a PUMA-type manipulator | Question: I'm struggling to find the DH parameters for this PUMA-type manipulator that yield the same results as the author (1):
The way I'm checking if the parameters I have are correct is by comparing the resulting J11, J21 & J22 matrices with the author. These sub-matrices are the constituents of the wrist Jacobian matrix (Jw).
I tried many different combinations of the DH parameters including:
α
=[0,90,0,-90,90,-90]
θ
=[0,0,0,0,0,0]
a=[0,0,a2,-a3,0,0]
d=[d1,-d2,0,-d4,0,0]
Which result in the same matrices as the author except for some minor differences. The general wrist Jacobian matrix and the sub-matrices obtained by the author are given by:
Whereas the result I got for J11 was:
$$
\left[
\begin{array}{ccc}
-d_2 c_1-s_1 (a_2 c_2-a_3 c_{23}+d_4 s_{23}) & c_1 (d_4 c_{23}-a_2 s_2+a_3 s_{23}) & c_1 (d_4 c_{23}+a_3 s_{23}) \\
c_1 (a_2 c_2-a_3 c_{23}+d_4 s_{23})-d_2 s_1 & s_1 (d_4 c_{23}-a_2 s_2+a_3 s_{23}) & s_1 (d_4 c_{23}+a_3 s_{23}) \\
0 & a_2 c_2-a_3 c_{23}+d_4 s_{23} & d_4 s_{23}-a_3 c_{23} \\
\end{array}\right]
$$
And for the J22 matrix I got:
$$
\left[
\begin{array}{ccc}
-c_1 s_{23} & c_4 s_1+c_1 c_{23} s_4 & s_1 s_4 s_5-c_1 (c_3 (c_5 s_2+c_2 c_4 s_5)+s_3 (c_2 c_5-c_4 s_2 s_5)) \\
-s_1 s_{23} & c_{23} s_1 s_4-c_1 c_4 & -c_5 s_1 s_{23}-(c_2 c_3 c_4 s_1-c_4 s_2 s_3 s_1+c_1 s_4) s_5 \\
c_{23} & s_{23} s_4 & c_{23} c_5-c_4 s_{23} s_5 \\
\end{array}\right]
$$
And the same J12 matrix as the author.
Perhaps the most pronounced difference here is that every Sin
[
θ2
+
θ3
]
is replaced with Cos
[
θ2
+
θ3
]
and vice versa, in addition to some sign differences.
Where am I going wrong here?
(1) Wenfu Xu, Bin Liang, Yangsheng Xu, "Practical approaches to handle the singularities of a wrist-partitioned space manipulator".
Answer: In looking at your matrices, I would guess that you (or the author!) have made a mistake somewhere in your alpha $\alpha$ terms. Recall the period shift identity that states:
$$
\sin{\left(\theta + \pi/2 \right)} = +\cos{\theta} \\
\cos{\left(\theta + \pi/2 \right)} = -\sin{\theta} \\
$$
So, if you (or again, the author) were off by 90 degrees ($\pi/2$), then that would explain the fact that you appear to have some sines and cosines swapped and some sign errors, too.
I don't have the time at the moment to go through each step by hand and try to evaluate where you've gone wrong, but I would imagine that you should be able to compare individual joint transforms and find the ones that don't match. Try changing the $\alpha$ values for that joint by $\pm \pi/2$ and see if what you have matches what the author has.
Authors can and do make mistakes, and if you're positive the author made a mistake then the professional thing to do would be to contact that author and/or the journal that published the paper and alert them to the mistake. I would just double- and triple-check that the author was incorrect before taking that step, though (run it by your professors, etc.) | {
"domain": "robotics.stackexchange",
"id": 1377,
"tags": "robotic-arm, manipulator, dh-parameters"
} |
What is the smallest oligocelluar organism? | Question: What is the smallest oligocelluar organism?
How many cells does it have?
EDIT
The question is motivated by this comment@Philosophy.SE
EDIT as recommended in comments
I'm looking for an example of an organism made of very few (the fewer, the better) sister cells (obtained by mitosis or aggregation) that are morphologically and functionally different. I would accept examples of species that exist in different "conformation" (unicellular and multicellular).
Answer: The classic example, though I am sure there are others that are smaller, is the slime mold Dictyostelium discoideum:
It can have up to 100,000 cells and exists as both single cells and as a multicellular organism (emphasis mine):
Dictyostelium amoebae grow as separate, independent cells but interact to form multicellular structures when challenged by adverse conditions such as starvation. Up to 100,000 cells signal each other by releasing the chemoattractant cAMP and aggregate together by chemotaxis to form a mound that is surrounded by an extracellular matrix. This mechanism for generating a multicellular organism differs radically from the early steps of metazoan embryogenesis. However, subsequent processes depend on cell-cell communication in both Dictyostelium and metazoans. Many of the underlying molecular and cellular processes appear to have arisen in primitive precursor cells and to have remained fundamentally unchanged throughout evolution. Basic processes of development such as differential cell sorting, pattern formation, stimulus-induced gene expression, and cell-type regulation are common to Dictyostelium and metazoans. | {
"domain": "biology.stackexchange",
"id": 1699,
"tags": "cell-biology, cell-culture"
} |
Does $E=mc^²$ mean there is a maximum attainable temperature? | Question: If energy is proportional to mass, and temperature is proportional to energy density, does it mean there is a certain absolute maximum temperature which can never be exceeded? With further energy added to the system simply increasing its mass without affecting the temperature anymore?
Answer:
Does e=mc² mean there is a maximum attainable temperature?
First lets clear up the misunderstanding implied on what $m$ is in the $E=mc^2$ . The formula comes from the early days of special relativity studies, where this $m$ is the relativistic mass, it is the mass a particle would have in Neutonian physics, the inertial mass, the resistance to increase momentum of a particle with velocities close to the velocity of light. At present one uses four vectors to do the kinematics of Lorentz transformations and the mass of a particle,( or a system of particles), is the length of the four vector called invariant mass, or rest mass because
The two formulae coincide in the frame where the momentum is zero. It is energy and momentum that are conserved within an inertial frame.
and temperature is proportional to energy density
Temperature is not proportional to energy density, it is related to the average kinetic energy in ideal gases, and gets more complicated for solids or fluids.
With further energy added to the system simply increasing its mass without affecting the temperature anymore?
Adding energy would increase the four vectors defining a system and accordingly, if the energy comes with additional four vectors, the invariant mass could grow, in principle without bounds if it were not for general relativity and the gravitational effects due to the large masses. When quantum mechanical theories are applied in cosmological models there are the Planck units , and for energy it is expected that for larger energies nothing can be known, so in that sense at present one considers a limit to the possibility of calculating energy beyond some value. Note that the mass of the proton is used in the definition of the Planck constants .This limit of energy introduced in an ideal gas model will also give limit to the knowledge of temperature. | {
"domain": "physics.stackexchange",
"id": 78516,
"tags": "special-relativity, temperature, mass-energy"
} |
Is parallel universe possible | Question: Does parallel universe really exists or just science fiction which ain't possible? Can we ever know?
Answer: Multiple and parallel universes are a feature of a number of "theories", including most famously (and probably before string theory), the many worlds interpretation of quantum mechanics.
The attraction of the many worlds or multiverse ideas is that they offer a ready explanation of why we appear to live in a uiverse that has been "fined tuned" for our existence.
The disadvantage of the "theory" is that there does not appear to be any readily available means to falsify it. For that reason many scientists view it as unscientific. | {
"domain": "astronomy.stackexchange",
"id": 1364,
"tags": "cosmology, universe, space, multiverse"
} |
Basic question about probability and measurements | Question: Say I have a Galton box, i.e. a ball dropping on a row of solid bodies. Now I want to calculate the probability distribution of the movement of the ball based on the properties of the body (case A). For instance if I change the position of the ball the distribution might change (case B). I want to know the distribution of the ball after it has hit the body, as a function of various properties. Does it even make sense to speak of a probability distribution here? Basically what I had in mind before, is that quantum mechanics is probabilistic and that classical mechanics is deterministic. Does this mean one can actually calculate where each ball will end up, if the measurements are precise enough?
Answer: In classical physics, all the motion of the objects and the behavior after all the recoils is predictable in principle. In practice, there's always some dependence on tiny errors in the knowledge of the initial state; tiny velocities that the elements may have and the motion and rotation of the marble in particular; tiny non-uniformities in the shape of the elements, and so on. The required accuracy in the knowledge of all these things is "exponential" (the error has to be at most $\exp(-X)$ where X is a number much greater than one) if we want to be able to predict the evolution for a long time, e.g. in a tall enough Galton box.
So in practice, the motion isn't predictable, much like it's not predictable when one is throwing dice. The precise behavior of the balls etc. may be described by probabilistic distributions whose width takes the "possible errors" produced in the real world into account. When several such probability distributions are convoluted, one gets some random distribution for the result. The Galton box is an example of that.
In quantum mechanics, one can't even imagine that there exists some sharp, non-probabilistic answer. Even if one knew everything about the objects perfectly, the final state would be undetermined – ambiguous – and only probabilities of different answers could be predicted. While this difference between classical physics and quantum mechanics seems "qualitative" in principle, it is very modest in the operational sense. Even in classical physics, due to the error margins etc., one should have adopted the fact that only probabilistic predictions were possible. Quantum mechanics takes this observation seriously and creates a framework for physics in which the precise deterministic predictions are non-existence not even in practice but even in principle. However, the ways how to calculate the probabilities are pretty much analogous. | {
"domain": "physics.stackexchange",
"id": 5726,
"tags": "experimental-physics, measurements, probability, determinism"
} |
How to specify library flags for a catkin package | Question:
I am trying to migrate one of my rosbuild packages to catkin. I am trying to specify the CFLags and LFlags that used to be hardcoded in the manifest.xml, but have to be specified in catkin_package cmake macro.
As far as I understand it, the default Lflags are set to {PROJECT_NAME}/lib, how would I go about changing this to, say, {PROJECT_NAME}/lib64?
I know I can specify an include directory which works fine and I can specify which libraries to link against, but how do I specify where to look for those libraries, i.e. the path after the -L flag?
Originally posted by arebgun on ROS Answers with karma: 2121 on 2013-01-25
Post score: 4
Answer:
Rather than explicitly exposing the CFLAGS directly, you should use catkin_package() to export from within CMake any libraries or headers to packages which depend on you.
Can you post a link to your package or put your old manifest/CMakeLists.txt up on a gist.github.com? With some more specifics I can show you exactly how this would work.
Originally posted by WilliamWoodall with karma: 1626 on 2013-02-05
This answer was ACCEPTED on the original site
Post score: 1
Original comments
Comment by arebgun on 2013-02-05:
@WilliamWoodall: basically, I am trying to write a wrapper package that downloads 3rd party code and builds it, like we used to be able to from plain Makefiles with the help of download/unpack/patch/build scripts. Not sure if it possible or recommended with catkin.
Comment by arebgun on 2013-02-05:
I have a CMake equivalent to the old Makefile but I have trouble combining my current CMakelists contents with catkin stuff. Package in question is here: https://github.com/arebgun/dynamixel_motor_experimental/tree/master/gearbox.
Comment by WilliamWoodall on 2013-02-05:
@arebgun That is not recommended with catkin anymore. You can continue to use rosbuild to accomplish this, but if you intend to release this package or depend on it with other catkin packages, then you'll need to follow our forthcoming 3rd-party recommendation. (recommendation will be a REP soon)
Comment by arebgun on 2013-02-05:
Yes, I wanted to release the package and have the ability for other catkin packages to depend on it. Will wait for the REP to be published. Thanks!
Comment by Dave Coleman on 2014-06-08:
Did anything come of this arebgun? I'm trying to fix gearbox linking for dynamixel_motor_experiemental as well, but my only hack is to use:
sudo ln -s /home/dave/ros/ws_clam/devel/lib/libflexiport.so /usr/lib/libflexiport.so | {
"domain": "robotics.stackexchange",
"id": 12588,
"tags": "catkin, cmake"
} |
Why does humidity cause a feeling of hotness? | Question: Imagine there are two rooms kept at the same temperature but with different humidity levels. A person is asked to stay in each room for 5 minutes. At the end of experiment if we ask them which room was hotter, they will point to the room with the higher humidity. Correct right?
How does humidity cause this feeling of hotness?
Answer: When the ambient humidity is high, the effectiveness of evaporation over the skin is reduced, so the body's ability to get rid of excess heat decreases.
Human beings regulate their body temperature quite effectively by evaporation, even when we are not sweating, thanks to our naked skin. (This, supposedly, is also what made it possible for early hominids to become hunters by virtue of being effective long-distance runners.)
Humans are so good at this, we can survive in environments that are significantly hotter than our body temperature (e.g., desert climates with temperatures in the mid-40s degrees Celsius) so long as the humidity remains low and we are adequately hydrated. (Incidentally, this is also why we are more likely to survive being locked in a hot car on a summer day than our furry pets.) In contrast, when the humidity is very high, even temperatures that are still several degrees below normal body temperature can be deadly already. | {
"domain": "physics.stackexchange",
"id": 66165,
"tags": "temperature, humidity"
} |
K&R Exercise 1-21. Write a program `entab` that replaces strings of blanks with tabs | Question: Intro
I'm going through the K&R book (2nd edition, ANSI C ver.) and want to get the most from it: learn (outdated) C and practice problem-solving at the same time. I believe that the author's intention was to give the reader a good exercise, to make him think hard about what he can do with the tools introduced, so I'm sticking to program features introduced so far and using "future" features and standards only if they don't change the program logic.
Compiling with gcc -Wall -Wextra -Wconversion -pedantic -std=c99.
K&R Exercise 1-21
Write a program entab that replaces strings of blanks by the minimum number of tabs and blanks to achieve the same spacing. Use the same tab stops as for detab. When either a tab or a single blank would suffice to reach a tab stop, which should be given preference?
Solution
The solution attempts to reuse functions coded in the previous exercises (getline & copy) and make the solution reusable as well. In that spirit, a new function size_t entab(char s[], size_t tw); is coded to solve the problem. For lines that can fit in the buffer, the solution is straightforward. However, what if they can't? The main routine deals with that, and most of the lines are treating the special case where we must merge the blanks from 2 getline calls. This exercise is just after the chapter where extern variables are introduced, and they really are convenient here enabling the entab function to be aware of the column we're at.
Code
/* Exercise 1-21. Write a program `entab` that replaces strings of
* blanks by the minimum number of tabs and blanks to achieve the same
* spacing. Use the same tab stops as for `detab`. When either a tab or
* a single blank would suffice to reach a tab stop, which should be
* given preference?
*/
#include <stdio.h>
#include <stdbool.h>
#define MAXTW 4 // max. tab stop width
#define LINEBUF MAXTW // line buffer size, must be >=MAXTW
size_t col = 0; // current column
size_t tcol = 0; // target column
size_t getline(char line[], size_t sz);
void copy(char * restrict to, char const * restrict from);
size_t entab(char s[], size_t tw);
int main(void)
{
extern size_t col; // current column
extern size_t tcol; // target column
char line[LINEBUF]; // input buffer
size_t len; // input buffer string length
size_t tw = 4; // tab width
if (tw > MAXTW) {
return -1;
}
len = getline(line, LINEBUF);
while (len > 0) {
len = entab(line, tw);
if (line[len-1] == '\n') {
// base case, working with a full, properly terminated line
// or a tail of one; we can safely print it
col = 0;
tcol = 0;
printf("%s", line);
len = getline(line, LINEBUF);
}
else if (line[len-1] != ' ') {
// could be part of a bigger line or end of stream and we
// don't have dangling blanks; we can safely print it
printf("%s", line);
len = getline(line, LINEBUF);
}
else {
// we have some dangling blanks and must peek ahead to
// know whether we can merge them into a tab or not
bool cantab = false;
char pline[LINEBUF]; // peek buffer
size_t plen; // peek buffer string length
plen = getline(pline, LINEBUF);
if (plen > 0) {
if (pline[0] == ' ') {
// count spaces in the peek; pspc = 1 because if
// we're here then we already know pline[0] == ' '
size_t pspc;
for (pspc = 1; (pline[pspc] == ' ' ||
pline[pspc] == '\t') && pspc < plen &&
pspc < tw; ++pspc) {
if (pline[pspc] == '\t') {
cantab = true;
}
}
// enough to warrant a tab stop?
if (col + pspc >= (col + tw)/tw*tw) {
cantab = true;
}
}
else if (pline[0] == '\t') {
cantab = true;
}
} // else we got EOF and those spaces have to stay
if (cantab) {
// pop the spaces and adjust current column accordingly
while (len > 0 && line[--len] == ' ') {
--col;
line[len] = '\0';
} // no need to fix len, as it gets reset below
}
printf("%s", line);
len = plen;
copy(line, pline);
}
}
return 0;
}
/* entab: process string from `s`, replace in-place spaces with tabs.
* Assume '\0' terminated string. Relies on extern variable for column
* alignment.
* tw - tab width
*/
size_t entab(char s[], size_t tw)
{
extern size_t col; // current column
extern size_t tcol; // target column
size_t j = 0;
bool gotnul = false;
for (size_t i = 0; !gotnul; ++i) {
// on blank or tab just continue reading and move our target
// column forward
if (s[i] == ' ') {
++tcol;
}
else if (s[i] == '\t') {
tcol = (tcol+tw)/tw*tw;
}
else {
// on non-blank char, if we're lagging behind target fill-up
// with tabs & spaces and then write the char, else just
// write the char
if (tcol > col) {
for (size_t at = (tcol/tw*tw-col/tw*tw)/tw; at > 0;
--at) {
s[j] = '\t';
++j;
col = (col+tw)/tw*tw;
}
for (size_t as = tcol-col; as > 0; --as) {
s[j] = ' ';
++j;
++col;
}
}
s[j] = s[i];
if (s[j] == '\0') {
gotnul = true;
}
else {
++j;
++col;
++tcol;
}
}
}
return j;
}
/* getline: read a line into `s`, return string length;
* `sz` must be >1 to accomodate at least one character and string
* termination '\0'
*/
size_t getline(char s[], size_t sz)
{
int c;
size_t i = 0;
bool el = false;
while (i + 1 < sz && !el) {
c = getchar();
if (c == EOF) {
el = true; // note: `break` not introduced yet
}
else {
s[i] = (char) c;
++i;
if (c == '\n') {
el = true;
}
}
}
if (i < sz) {
if (c == EOF && !feof(stdin)) { // EOF due to read error
i = 0;
}
s[i] = '\0';
}
return i;
}
/* copy: copy a '\0' terminated string `from` into `to`;
* assume `to` is big enough;
*/
void copy(char * restrict to, char const * restrict from)
{
size_t i;
for (i = 0; from[i] != '\0'; ++i) {
to[i] = from[i];
}
to[i] = '\0';
}
Test
Input
sdas
a a aaa aasa aaa d dfsdf aaa ss s g
aa asd s f f f X
asf
Showing the tabs as ^I:
$ cat -T test.txt
sdas
a a aaa aasa^I aaa^I d dfsdf aaa ss s g
aa^I ^Iasd^I s^If^If f^IX ^I
^I asf ^I^I^I ^I
Output
sdas
a a aaa aasa aaa d dfsdf aaa ss s g
aa asd s f f f X
asf
Showing the tabs as ^I:
$ cat -T out.txt
^I^I^I^I^I^I^I^I^I^Isdas^I^I^I^I
^Ia^Ia^Iaaa^Iaasa^I aaa^I^I^I^Id^Idfsdf^I^I^Iaaa^Iss^Is^I^Ig
aa^I^Iasd^I^I s^If^If f^IX^I
^I^I asf^I^I^I^I
Answer: OP only asked a few direct questions.
When either a tab or a single blank would suffice to reach a tab stop, which should be given preference?
Use blanks ' '.
Developers Who Use Spaces Make More Money Than Those Who Use Tabs
For lines that can fit in the buffer, the solution is straightforward. However, what if they can't?
Use a generous buffer. And when the line does not fit in the buffer, exit with a failure message.
Let us go deeper ...
When memory was expensive, too often code used fixed size small buffers, leading to problems with expanded uses.
With cheap memory, code could be written to allow for re-allocated read buffers sizes, virtually unlimited yet technically limited to SIZE_MAX.
This value can readily exceed the memory capacity of the platform.
Now we live in an era of code security vs. hacking. A program that allows external input to readily consume system resources is a hacker target as part of an exploitation. For the purpose of en-tabbing a line, why support 1 Gbyte long lines? Say code limited lines to 1 MByte. Yes once in a great while some application will die due to a long line and a Defect Report may ensue - and go to the bottom of the list in resolution handling. Risk vs. reward: handling ever larger lines vs. greater exploitation risk.
In any case, C has an Environmental limit,
An implementation shall support text files with lines containing at least 254 characters, including the terminating new-line character. The value of the macro BUFSIZ shall be at least 256. C11dr §7.21.2 7
Any code working with lines much larger than BUFSIZ runs into that limitation and so risks UB.
I recommend:
Use buffers 2x the largest expected need for that task. In this case, consider #define LINEBUF (BUFSIZ+1u) or #define LINEBUF (BUFSIZ*2u) and allocate char *line.
Treat long input as non-compliant and either reject the partial line and the rest of the line OR fail the code with a message.
Note: On many platforms, code can allocate very large buffers with *alloc() and not truly consume resources until needed. Why is malloc not “using up” the memory on my computer?
.
For lines that can fit in the buffer, the solution is straightforward. However, what if they can't? (Take 2)
Entab only requires about a few bytes. Think state machine. Re-write code. See below.
Other
Conditions
Conditions like LINEBUF >= MAXTW can be made into a compile time check
#if !(LINEBUF >= MAXTW)
#error Buffer size too small
#endif
Also research Static assert in C.
Overview
Nice format and good style.
getline()
ssize_t getline(char **, size_t *, FILE *) is a popular *nix function that conflicts with OP's size_t getline(char s[], size_t sz). Consider a different name my_getline().
Suggested alternative with, IMO, improvements. (Ignoring "break not introduced yet")
size_t my_getline(char s[], size_t sz) {
if (sz <= 0) { // Handle this pathological case right away
return 0;
}
sz--;
int c = 0;
size_t i = 0;
while (i < sz && (c = getchar()) != EOF) {
s[i++] = (char) c;
if (c == '\n') {
break;
}
}
if (c == EOF && !feof(stdin)) { // EOF due to read error
i = 0;
}
s[i] = '\0';
return i;
}
Note: OP's code has UB in the pathological case sz == 1 as it tests uninitialized c with c == EOF.
copy()
Alternative code for consideration:
void copy(char * restrict to, char const * restrict from) {
while ((*to++ = *from++));
}
entab()
Alternative code for consideration:
With the following there is no limitation on tab width, line length (other than int). No LINEBUF or MAXTW buffers needed. Just 3 int.
#define PRT_TAB "^I"
#define TAB_WIDTH 4
#define FAVORRED ' '
void entab(FILE *istream) {
int queued_spaces = 0;
int tab_position = 0;
int ch;
while ((ch = fgetc(istream)) != EOF) {
if (ch == ' ') {
queued_spaces++;
tab_position++;
if (tab_position == TAB_WIDTH) {
#if FAVORRED == '\t'
putchar('\t');
#else
if (queued_spaces == 1) putchar(' ');
else fputs(PRT_TAB, stdout);
#endif
queued_spaces = 0;
tab_position = 0;
}
} else if (ch == '\t') {
fputs(PRT_TAB, stdout);
queued_spaces = 0;
tab_position = 0;
} else {
while (queued_spaces > 0) {
putchar(' ');
queued_spaces--;
tab_position++;
}
putchar(ch);
tab_position++;
tab_position %= TAB_WIDTH;
}
}
while (queued_spaces > 0) {
putchar(' ');
queued_spaces--;
}
}
int main(void) {
int ch;
FILE *istream = fopen("Input", "rb");
assert(istream);
puts("Input");
while ((ch = fgetc(istream)) != EOF) {
putchar(ch);
}
rewind(istream);
puts("");
puts("Output");
entab(istream);
fclose(istream);
} | {
"domain": "codereview.stackexchange",
"id": 32593,
"tags": "beginner, c, strings, formatting, io"
} |
Optimising two for loops in Python | Question: I would like to make this code more efficient, I have isolated the problem as being these two for loops:
for i, device in enumerate(list_devices):
for data_type_attr_name in data_types:
result_list_element = {
"device_reference": device.name,
"device_name": "REF - " + device.name,
"data_type": data_type_attr_name,
"type": next(
(data_type["type"] for data_type in DATA_TYPES if data_type["name"] == data_type_attr_name)
),
"data_points": getattr(device, data_type_attr_name)(
is_last_value=is_last_value,
from_timestamp=from_timestamp,
to_timestamp=to_timestamp,
aggregate_period_name=aggregate_period_name,
aggregate_operation_name=aggregate_operation_name,
decimal_places=decimal_places,
),
}
if not isinstance(result_list_element["data_points"], list):
raise TypeError("`data_points` must be returned as a list, even if it contains only one element.")
result_list.append(result_list_element)
return result_list
list_devices is a list of Django model objects, data_types is a list of strings, each one representing a data type.
Is there any way of losing one of the for loops while maintaining the same output?
Thanks
Answer: Use itertools.product
import itertools
for device, data_type_attr_name in itertools.product(list_devices, data_types):
result_list_element = {
"device_reference": device.name,
"device_name": "REF - " + device.name,
"data_type": data_type_attr_name,
"type": next((data_type["type"] for data_type in DATA_TYPES if data_type["name"] == data_type_attr_name)),
"data_points": getattr(device, data_type_attr_name)(
is_last_value=is_last_value,
from_timestamp=from_timestamp,
to_timestamp=to_timestamp,
aggregate_period_name=aggregate_period_name,
aggregate_operation_name=aggregate_operation_name,
decimal_places=decimal_places,
),
}
if not isinstance(result_list_element["data_points"], list):
raise TypeError("`data_points` must be returned as a list, even if it contains only one element.")
result_list.append(result_list_element)
return result_list | {
"domain": "codereview.stackexchange",
"id": 35698,
"tags": "python, performance, django"
} |
Multiple spin measurements in the same direction | Question: What will be the outcome of the experiment during which charged particles will go through the set of Stern Gerlach apparatus aligned in the same direction (say Up)?
[SG Up] < ?1
[ Source ] -> [SG Up] <
[SG Up] < ?2
Has been this experiment actually conducted? What was its outcome?
for 1 "all up"
for 2 "all down"
or something else?
Answer: The Stern-Gerlach apparatus creates what is known as spin-path entanglement – ie. the particle's spin and its position become entangled. More specifically, if a particle flies from a SG apparatus up, then we are sure that its spin is also up, and vice versa.
If we measure spin along an axis and the incoming particle is already “polarized” along the same axis, we will get just one result with 100 % probability. The reason for this is simple: assume a particle's spin points along an axis $\vec s$ and the SG apparatus has an axis $\vec a$, then the probability of measuring “up” is given by $\cos^2 \frac{\alpha}{2}$, where $\alpha$ is the angle between $\vec s$ and $\vec a$. For $\alpha = 0$ the probability is $1$ and for $\alpha = 180^°$ the probability is $0$.
Therefore, your intuition is right. Since all the particles ariving at detector 1 are spin up, they will all bend upwards. Likewise, all the particles ariving at 2 are all spin down, therefore they'd be deflected down. If your source is giving off “unpolarized” particles with spin directed in random directions, then you'd get a 50-50 chance of measuring “1 up” and “2 down”.
But as @Charlie pointed out, this applies to “fast” consecutive measurements, ie. those where you don't apply any additional tricks to the flying particles. For example, if there was an additional homogeneous magnetic field between the SG apparatuses, then the Larmor precession would steer the spins away from the perfect zero angle and you could measure “1 down” and “2 up” with non-zero probability. | {
"domain": "physics.stackexchange",
"id": 79517,
"tags": "quantum-mechanics, quantum-spin"
} |
Independence of Gravitational acceleration with depth if local density of earth is 2/3 of average density | Question:
Show That the acceleration of gravity in a vertical mine shaft is independent of the depth if the local density of the Earth is 2/3 of the average density. Assume that the Earth is a spherically symmetric,non-rotating body.
This is a question from the book Resnick Halliday Krane vol 1 P14-10. The reference is "Gravity in a Mine Shaft" by Peter M.Hall and David J.Hall, The Physics Teacher, November 1995 p.525.)
I don't understand what it means by the term local density and what the question is asking. There's a pdf available of citation but I'm not able to download it https://aapt.scitation.org/doi/abs/10.1119/1.2344284?journalCode=pte
There's a similar question available but the solution is quite not clear that what actually is happening . Please before downvoting or marking this question as a duplicate, help me to understand this question. Why is the gravity in a mine shaft independent of depth if the local density is 2/3 of the average density?
Answer: For a spherically symmetric planet (think concentric shells of density that vary with radius), Gauss's Law say the gravity you feel at $r_0$ depends only on the mass at $r<r_0$.
So if you go down a 100m mine shaft, the gravity from the top 100m of the entire planet adds to zero.
Moreover, any mass, $M$, below you acts as if it were concentrated at the center, with force per unit mass:
$$ g = G\frac M {r_0^2} $$
So for a spherical Earth, the gravity at the surface is the same if all the mass is in a pea-sized lump at the center, or a thin, dense, shell making a hollow planet.
That's the physics principle. The rest is implementation.
The gravity at the surface is:
$$ g(r) = G\frac M{r^2}$$
If we reduce $r$, there is less mass $M$ pulling on us, but it is closer, so it pulls harder. At which density do the two effects cancel?
From here you apply calculus's rule$^1$ for the derivative of a ratio:
$$ \frac{dg}{dr}=\frac{G}{r^2}\left(\frac{dM}{dr}-\frac{2M} r \right)=0$$
or:
$$\frac{dM}{dr} = \frac{2M} r $$
The LHS is the surface area times the local density:
$$\frac{dM}{dr} = 4\pi r^2\rho(r)$$
Meanwhile, on the RHS, the mass is the volume times the average density:
$$ M =\frac {4\pi} 3 r^3 \bar{\rho}$$
Combine those:
$$4\pi r^2\rho(r) = \frac {4\pi} 3 r^3 \bar{\rho}\frac 2 r$$
and voila:
$$\rho(r) =\frac 2 3 \bar{\rho}$$
A very nice problem.
[1] The quotient rule is the derivative$^2$ of the quotient of two functions:
$$ \left(\frac f g\right)'=\frac{gf'-fg'}{g^2}$$
Here I set:
$$ f(r)=M(r)\rightarrow f'=dM/dr$$
$$ g(r)=r^2 \rightarrow g'=2r $$
so
$$(f/g)'=\frac{(dM/dr)r^2 - 2Mr}{r^4}=\frac 1{r^2}\left(\frac{dM}{dr} -\frac{2M}r\right)$$
[2] The derivative is a formal way to quantify how a function changes. The field that deals with this is called calculus.
Here, we are interest in the gravity at the Earth surface ($R$):
$$ g(R) = g(r)_{|_{r=R}}$$
and it's value at a slightly smaller $r=R-\delta r\ $, $g(R-\delta r)$. The change in the gravity strength is:
$$-\delta g = g(R-\delta r)-g(R)$$
To get the rate of change, we need to compute thechjnge in gravity per small distance shifted:
$$\frac{-\delta g}{\delta r} = \frac{g(R-\delta r)-g(R)}{\delta r}$$
In the limit that $\delta r\rightarrow 0$, this defines the derivative:
$$-\frac{dg}{dr}\equiv_{\delta r\rightarrow 0} \frac{g(R-\delta r)-g(R)}{\delta r}$$
(where the minus signs are because we're decreasing $r$ as required in the problem).
The good news as that many derivatives are well known, particularly any power functions:
$$ f(x) = x^n $$
The derivative, often written as "prime", is:
$$\frac{df}{dx}=f'(x) = nx^{n-1}$$
so with $n=-2$:
$$\frac d{dx}\left(\frac 1 {x^2}\right)=\frac d{dx}(x^{-2})=-2x^{-3}=\frac{-2}{x^3}$$ | {
"domain": "physics.stackexchange",
"id": 80528,
"tags": "newtonian-mechanics, newtonian-gravity, planets, density"
} |
Is it possible to run a timer inside an action server? | Question:
Hi all,
my question is rather theoretical and it's about best practice, I hope what I'm thinking about it's not completely useless.
Let me introduce you the problem.
Basically, I have a node which is in charge of doing two things:
publishing velocity commands to /cmd_vel
listening to /joint_states topic
This node is not a controller (as one would expect). The velocity commands are defined a-priori and the subscriber to the /joint_states topic is used to track the robot trajectory. In fact, the publisher spins at 100Hz while the listener at ~50Hz (which is the frequency of the /joint_states topic). To do so, the publisher is implemented with a timer, the listener with a subscriber and I use an asynchronous spinner to run them both in the same node:
ros::AsyncSpinner spinner(4);
spinner.start();
ros::waitForShutdown();
So far so good. Now, imagine I want to implement a safety layer. That is, while tracking the robot trajectory I detect that the robot reached a forbidden position and I want to halt the robot by stopping publishing velocity commands.
My idea was to implement it with an action server. In particular, a server with the GoalCallbackMethod (for the /joint_states topic). Therefore, my question is: can I use a ros::Timer inside the action server class? If possible, what I have to do in order to run the server callback and the timer callback asynchronously? An asynchronous spinner is enough?
Any suggestion would be appreciated!
Thanks.
EDIT:
@gvdhoorn: thanks for your reply!
Regarding the last question, my idea was that the action server publishes as feedback the robot pose. Then, the client cancels the goal if the robot reached a forbidden position. Does it make sense to you?
Originally posted by schizzz8 on ROS Answers with karma: 183 on 2019-06-21
Post score: 0
Answer:
Therefore, my question is: can I use a ros::Timer inside the action server class?
Yes (at least, if you're referring to your own action server class here, not something like SimpleActionServer).
If possible, what I have to do in order to run the server callback and the timer callback asynchronously? An asynchronous spinner is enough?
It should be, yes.
If it doesn't work, post back here.
So far so good. Now, imagine I want to implement a safety layer. That is, while tracking the robot trajectory I detect that the robot reached a forbidden position and I want to halt the robot by stopping publishing velocity commands.
are you describing a valve here? So your node receives velocity commands, checks whether everything is ok. If ok, forward, if not ok, stop forwarding.
Edit:
Regarding the last question, my idea was that the action server publishes as feedback the robot pose. Then, the client cancels the goal if the robot reached a forbidden position. Does it make sense to you?
not really, but it probably does to you, so that is fine.
if the node is not a controller, where does it get its velocity commands from? And if the feedback of the robot is joint_states, couldn't your client subscribe to that topic? Or are you looking at Cartesian poses here? In that case: TF?
Originally posted by gvdhoorn with karma: 86574 on 2019-06-21
This answer was ACCEPTED on the original site
Post score: 1 | {
"domain": "robotics.stackexchange",
"id": 33236,
"tags": "ros-kinetic, roscpp"
} |
Evaluating the $A \land A \land A$ in the Chern-Simons action | Question: I am trying to evaluate $A \land A \land A$, but I am a bit confused on how exactly to do it and produce the usual notation used in physics. I am trying to use the definition of the wedge product of Lie algebra valued forms given here Lie algebra-valued differential form, but I am not sure how to proceed. Any input is very much appreciated.
Answer: Does this help:
Consider a lie-lagebra valued form $A=A_\mu^a \lambda_a dx^\mu$ then
$$
{\rm tr}(A^3)= {\rm tr} \{\lambda_a\lambda_b \lambda_c\} A^a_\alpha A^b_\beta A^c_\gamma dx^\alpha \wedge dx^\beta \wedge dx^\gamma\\
{\rm tr} \{\lambda_a\lambda_b \lambda_c\} A^a_\alpha A^b_\beta A^c_\gamma \epsilon^{\alpha\beta\gamma} dx^1\wedge dx^2 \wedge dx^3\\
\frac 12 {\rm tr} \{\lambda_a[\lambda_b, \lambda_c]\} A^a_\alpha A^b_\beta A^c_\gamma \epsilon^{\alpha\beta\gamma} dx^1\wedge dx^2 \wedge dx^3.
$$ | {
"domain": "physics.stackexchange",
"id": 80588,
"tags": "differential-geometry, lie-algebra, topological-field-theory, chern-simons-theory"
} |
Does time expand with space? (or contract) | Question: Einstein's big revelation was that time and space are inseparable components of the same fabric. Physical observation tells us that distant galaxies are moving away from us at an accelerated rate, and because of the difficulty (impossibility?) of defining a coordinate system where things have well defined coordinates while also moving away from each other without changing the metric on the space, we interpret this to mean that space itself is expanding.
Because space and time are so directly intertwined is it possible that time too is expanding? Or perhaps it could be contracting?
Answer: The simple answer is that no, time is not expanding or contracting.
The complicated answer is that when we're describing the universe we start with the assumption that time isn't expanding or contracting. That is, we choose our coordinate system to make the time dimension non-changing.
You don't say whether you're at school or college or whatever, but I'm guessing you've heard of Pythagoras' theorem for calculating the distance, $s$, between two points $(0, 0, 0)$ and $(x, y, z)$:
$$ s^2 = x^2 + y^2 + z^2 $$
Well in special relativity we have to include time in the equation to get a spacetime distance:
$$ ds^2 = -dt^2 + dx^2 + dy^2 + dz^2 $$
and in general relativity the equation becomes even more complicated because we have to multiply the $dt^2$, $dx^2$, etc by factors determined by a quantity called the metric, and usually denoted by $g$:
$$ ds^2 = g_{00}dt^2 + g_{11}dx^2 + g_{22}dy^2 + ... etc $$
where the $... etc$ can include cross terms like $g_{01}dtdx$, so it can all get very hairy. To be able to do the calculations we normally look for ways to simplify the expression, and in the particular case of the expanding universe we assume that the equation has the form:
$$ ds^2 = -dt^2 + a(t)^2 d\Sigma^2 $$
where the $d\Sigma$ includes all the spatial terms. The function $a(t)$ is a scale factor i.e. it scales up or down the contribution from the $dx$, $dy$ and $dz$, and it's a function of time so the scale factor changes with time. And this is where we get the expanding universe. It's because when you solve the Einstein equations for a homogenous isotropic universe you can calculate $a(t)$ and you find it increases with time, and that's what we mean by the expansion.
However the $dt$ term is not scaled, so time is not expanding (or contracting). | {
"domain": "physics.stackexchange",
"id": 79931,
"tags": "general-relativity, spacetime, time, space-expansion, dark-energy"
} |
Caching Color-Bitmaps as MemoryStreams | Question: I am in the need to cache Bitmap's in a memory-optimized way because the API I am building will need to process many colored Bitmap's in parallel and can be used in x86 or x64 compiled applications.
If the API is being used in x86 I can't just store the Bitmap's as they are but need to store them as compressed MemoryStream's otherwise the API would throw an OutOfMemoryException pretty fast.
After storing the "Bitmap's" they will be processed by multiple threads hence thread-safety is a major point.
Any feedback is welcome.
public static class ImageCache
{
private static int currentId = 0;
private static readonly object addImageLock = new object();
private static readonly object releaseImageLock = new object();
private static readonly ConcurrentDictionary<int, MemoryStream> images = new ConcurrentDictionary<int, MemoryStream>();
/// <summary>
/// Release an image based on its id.
/// </summary>
/// <param name="id"></param>
public static void ReleaseIamge(int id)
{
lock (releaseImageLock)
{
ReleaseMemoryStream(id);
}
}
private static void ReleaseMemoryStream(int id)
{
MemoryStream ms = null;
if (images.TryGetValue(id, out ms) && ms != null)
{
images[id].Dispose();
images[id] = null;
}
}
/// <summary>
/// Releases all Images
/// </summary>
public static void ReleaseAllImages()
{
lock (releaseImageLock)
{
lock (addImageLock)
{
foreach (var id in images.Keys)
{
ReleaseMemoryStream(id);
}
images.Clear();
}
}
}
/// <summary>
/// Returns a Bitmap from the cache which is identified by an id
/// </summary>
/// <param name="id"></param>
/// <returns></returns>
public static Bitmap GetBitmap(int id)
{
lock (releaseImageLock)
{
MemoryStream ms = null;
if (images.TryGetValue(id, out ms))
{
if (ms != null)
{
return (Bitmap)Image.FromStream(ms);
}
}
}
return null;
}
/// <summary>
/// Adds an Bitmap to the cache
/// </summary>
/// <param name="bitmap"></param>
/// <returns>0 if the Bitmap is null, otherwise a uique id</returns>
public static int Add(Bitmap bitmap)
{
if (bitmap == null)
{
return 0;
}
var ms = new MemoryStream();
bitmap.Save(ms, ImageFormat.Tiff);
var id = 0;
lock (addImageLock)
{
// If the dictionary is empty we can reset the currentId
if (images.Count == 0)
{
currentId = 0;
id = Interlocked.Increment(ref currentId);
images.TryAdd(id, ms);
return id;
}
// We don't know how long an application using this is running and how many
// images having been stored but we don't want to reach int.MaxValue here
// hence we recycle the Value of a KeyValuePair of the dictionary if the Value
// will be null.
id = images.Where(item => item.Value == null).FirstOrDefault().Key;
if (id == 0)
{
id = Interlocked.Increment(ref currentId);
}
images[id] = ms;
}
return id;
}
}
Clarifying comments:
Why don't you use a Guid as id and key in the dictionary?
Well the problem with this is that I need to "mimic" an API we use for our main application in such a way that I can interchange this API and the other API. The other api is a comercial imaging sdk whichs licence doesn't permits the use of it for a sdk/api.
Answer: First I'm not a fan of recycling IDs. Too much confusion can happen. For example Thread 1 adds a Dog Image and gets ID 1. Thread 2 calls ReleaseAllImages and then adds a Cat image and gets ID 1. Now Thread 1 think it has a Dog image but instead returns back a Cat image. There are times we need to recycle ID but only you know the business requirements if it's truly needed or not.
Second It's thread safe but you are not getting the benefit of ConcurrentDictionary this way. Everything is locked. If two threads are trying to add one will be blocked until the other is free. It doesn't have to be this way.
I'm going to go down the path that you need to recycle IDs
Instead of leaving the record in the dictionary you can remove it and have a queue of ID to recycle. Then on the removing methods check if we removed all the records from the Dictionary and if so then enter the lock to see if we can reset the counter and queue. Going to use ReaderWriterLockSlim to allow multiple adds and only block when we are removing.
public static class ImageCache
{
private static int currentId = 0;
private static readonly ConcurrentDictionary<int, MemoryStream> images =
new ConcurrentDictionary<int, MemoryStream>();
// stores removed ID to reuse
private static ConcurrentQueue<int> recycle = new ConcurrentQueue<int>();
private static readonly ReaderWriterLockSlim recycleLocker = new ReaderWriterLockSlim();
/// <summary>
/// Release an image based on its id.
/// </summary>
/// <param name="id"></param>
public static void ReleaseIamge(int id)
{
ReleaseMemoryStream(id);
TryReset();
}
/// <summary>
/// Releases all Images
/// </summary>
public static void ReleaseAllImages()
{
foreach (var id in images.Keys)
{
ReleaseMemoryStream(id);
}
TryReset();
}
private static void ReleaseMemoryStream(int id)
{
MemoryStream ms = null;
if (images.TryRemove(id, out ms))
{
recycle.Enqueue(id);
ms.Dispose();
}
}
private static void TryReset()
{
if (!images.IsEmpty)
{
return;
}
//need to lock here
if (recycleLocker.TryEnterWriteLock(TimeSpan.FromMilliseconds(100)))
{
try
{
// make sure another thread didn't sneak in and add another image
if (images.IsEmpty)
{
currentId = 0;
Interlocked.Exchange(ref recycle, new ConcurrentQueue<int>());
}
}
finally
{
recycleLocker.ExitWriteLock();
}
}
}
GetBitmap method doesn't need to check if the image is null now we can use the TryGetValue and not need to lock at all
/// <summary>
/// Returns a Bitmap from the cache which is identified by an id
/// </summary>
/// <param name="id"></param>
/// <returns></returns>
public static Bitmap GetBitmap(int id)
{
MemoryStream ms = null;
if (images.TryGetValue(id, out ms))
{
return (Bitmap) Image.FromStream(ms);
}
return null;
}
Add method will need to let the lock know we are reading from the recycle queue
/// <summary>
/// Adds an Bitmap to the cache
/// </summary>
/// <param name="bitmap"></param>
/// <returns>0 if the Bitmap is null, otherwise a uique id</returns>
public static int Add(Bitmap bitmap)
{
if (bitmap == null)
{
return 0;
}
recycleLocker.EnterReadLock();
try
{
var ms = new MemoryStream();
bitmap.Save(ms, ImageFormat.Tiff);
// Recycle Id or make new one
int id;
if (!recycle.TryDequeue(out id))
{
id = Interlocked.Increment(ref currentId);
}
// this should never be possible to fail
images.TryAdd(id, ms);
return id;
}
finally
{
recycleLocker.ExitReadLock();
}
}
Using the ReaderLockerSlim will allow multiple reads but only one write. Now multiple threads could Add images and Get images without blocking each other and the code only blocks when it's resetting the counters. | {
"domain": "codereview.stackexchange",
"id": 32206,
"tags": "c#, image, thread-safety, concurrency"
} |
choosing between v belt and round belt for fast RPM, low torque application | Question: My pulleys are going to be rotated with a 1000-2000 RPM motor, with a constant RPM controlled by a hall effect sensor, there is going to be almost no torque as one of the pulleys just has a 100 gram optical element attached to it and all the pulleys are on bearings.
Any suggestions on choosing v belt or circular belt? Price is pretty similar and v seems to reduce risk of slipping and has better grip. Cant find a disadvantage of v belts.
I just assume slippage might become in issue with round belts at these RPMs if there isn't enough grip, no other reason to not choose them over v belts, really.
I'd imagine tooted belts wouldn't provide much advantage here besides any backlash or slippage when the system begins rotating but I don't see how it would be more precise after the speed is reached and the speed and load remain constant during operation.
Answer: Depends on many things. For example I have sometimes used round belts for these applications simply because i happened to have suitably sized O- rings which i could use as belts and didnt have V- belts around. This means i could have a working version today, instead of in a day or two. *
The groove for a round belt may be minimally easier to manufacture with a manual lathe. Also the round belt is slightly easier to install if you do not use a tensioner mechanism as its possible to roll it into place.
Remember that your feedback should come from the final axis thisway it hardly matters if your belt slips. Slippage may be minimal anyway, but if you are really concerned use a toothed belt instead.
* dont underestimate this, as you may end up doing some higher priority work in future. But then if you do not need to concern yourself with assembly and manufacturing choose what you think is best. | {
"domain": "engineering.stackexchange",
"id": 2022,
"tags": "mechanical-engineering, motors, pulleys"
} |
Force as a partial derivative | Question: I'm trying to calculate the average power for a 1D sine wave (traveling through a rope with tension $T'$)
$$y(x,t)=A \sin(kx+ \omega t)$$
I start by calculating the instant power: I know that $P=Fv$ and by Newton's law $F=ma=m\frac{\partial ^2y}{\partial t ^2}$ and $v=\frac{\partial y}{\partial t}$ ($v$ is the velocity of an element $dx$ of the rope).
Calculating the partial derivatives I end up with:
$$\frac{\partial y}{\partial t}=A \omega \cos(kx+ \omega t)$$
$$\frac{\partial ^2y}{\partial t ^2}=-A \omega ^2 \sin(kx+ \omega t)$$
And I really don't know how to continue, so I looked at the solution and the professor came up with this:
$$P=Fv=\left(T' \frac{\partial y}{\partial x}\right)\left(\frac{\partial y}{\partial t}\right)$$
I don't understand why $F=T' \frac{\partial y}{\partial x}$ ?
Answer: We obtain this result from trigonometry and small-angle approximations. Each point in the rope is moving only vertically. For small angles, the vertical force $F$, or vertical component of the tension, is the product of the tension $T'$ and the slope (i.e., $F=T'\sin\theta\approx T'\tan\theta=T'\frac{\partial y}{\partial x}$):
Source: Morin, Transverse Waves on a String, p. 20. Obtained from the fifth result of an online search for "power" sinusoid rope "tension" "force". | {
"domain": "physics.stackexchange",
"id": 85640,
"tags": "homework-and-exercises, forces, waves"
} |
Array Dynamic resize in heap | Question: I have answered a Question in Stackoverflow link.
a) Create a function called resize that can be used to increase the
size of integer arrays dynamically. The function takes three
parameters. The first parameter is the original array, the second
parameter is the size of this array, and the third parameter is the
size of the larger array to be created by this function. Make sure
that you allocate memory from the heap inside this function. After
allocating memory for the second array the function must copy the
elements from the first array into the larger array. Finally, the
function must return a pointer to the new array.
b. In main, allocate an array on the heap that is just large enough to
store the integers 5, 7, 3, and 1.
c. Resize the array to store 10 integers by calling the resize
function created in step a. Remove the old (smaller) array from the
heap. Add the numbers 4, 2, and 8 to the end of the new array.
d. Write a sort function that sorts any integer array in increasing
order.
e. Use the sort function to sort the array of numbers in c above.
Display the sorted numbers.
Is there a Dangling pointer issue.
#include <array>
#include <iostream>
void swap(int *xp, int *yp)
{
int temp = *xp;
*xp = *yp;
*yp = temp;
}
//Bubble Sort
bool sort(int arr[], int size)
{
for( int i = 0; i< size -1; i++)
{
for( int j = 0; j < size - i -1; j++)
{
//descending order
if(arr[j]<arr[j+1])
{
swap(&arr[j], &arr[j+1]);
}
}
}
return true;
}
void Print(int Array[], int nSize)
{
for( int i = 0; i < nSize; i++)
{
std::cout<<" "<<Array[i];
}
std::cout<<"\n";
}
void Resize( int *&Array, const int& nSizeOld, const int& nSize )
{
int * newArray = new int[nSize];
//Copy Elements of the Array
for(int i = 0; i< nSize; i++)
{
newArray[i] = Array[i];
}
delete[] Array;
//Assign ptr of Prev to new Array
Array = newArray;
}
int _tmain(int argc, _TCHAR* argv[])
{
const int kNewSize = 10, kSize = 5;
int *pMyArray = new int[kSize];
//Set Values
for( int i = 0; i< kSize; ++i )
{
pMyArray[i] = i * 5;
}
Resize( pMyArray, kSize, kNewSize );
//Set Values
for( int i = kSize; i< kNewSize; ++i )
{
pMyArray[i] = i * 10;
}
Print(pMyArray, kNewSize);
sort(pMyArray, kNewSize);
Print(pMyArray, kNewSize);
if( pMyArray!=NULL )
{
delete[] pMyArray;
}
return 0;
}
Answer: If you had tagged this code as C, it would have been acceptable. Since you tagged it as C++, it's horrible.
Instead of writing your own swap function, there's already std::swap in <algorithm>.
Instead of writing bubble sort yourself, just use std::sort, also from <algorithm>.
Instead of using arrays and resizing them yourself, just use std::vector<int>, from <vector>.
After applying these transformations, you cannot have a dangling pointer anymore since your code is completely pointer-free.
As part of an exercise for learning the basic operations on memory management, it's ok to write code like this, but never ever use such code in production. In production the code should look like this:
#include <algorithm>
#include <iostream>
#include <vector>
void Print(const std::vector<int> &nums)
{
for(int num : nums)
{
std::cout << " " << num;
}
std::cout << "\n";
}
int main()
{
std::vector<int> nums { 5, 7, 3, 1 };
// There's probably a more elegant way to add the elements to the vector.
nums.push_back(4);
nums.push_back(2);
nums.push_back(8);
std::sort(nums.begin(), nums.end());
Print(nums);
}
By the way, your original code doesn't have any dangling pointer as well. Well done.
You don't need the != NULL check before the delete[] since that pointer cannot be null. In modern C++ (since C++11 I think) you would also write nullptr instead of NULL. The reason is that historically NULL had not been guaranteed to be of pointer type.
Have a look at https://en.cppreference.com/w/cpp/algorithm for more algorithms that you shouldn't implement yourself in C++.
I would have liked to write the push_back block in a shorter way, as well as the Print function. I'm sure there's a more elegant way, I just don't know it. | {
"domain": "codereview.stackexchange",
"id": 34354,
"tags": "c++, c++11, pointers"
} |
Gazebo model: wheels slip after upgrade to Electric | Question:
Dear Gazebo gurus,
after switching to Electric, my robot model seems to lose friction with the ground in Gazebo. The wheels are still turning, but the robot doesn't move forward. The same code works perfectly in Diamondback.
Steps to reproduce:
git clone http://kos.informatik.uni-osnabrueck.de/uos-ros-pkg.git
rosmake kurt_gazebo kurt_teleop_key
roslaunch kurt_gazebo kurt_wg_world.launch
rosrun kurt_teleop kurt_teleop_key
Now use the w,q,e,s keys to send cmd_vel commands.
Update 1: Here's a short video of that behaviour:
http://vimeo.com/30067445
Update 2: It's not a problem with my custom controller. In order to isolate the problem, I removed the gazebo_ros_kurt controller from kurt_description/bases/kurt_indoor.urdf.xacro and applied a body wrench to one wheel:
rosservice call gazebo/apply_body_wrench '{body_name: "kurt::left_front_wheel_link" , wrench: { force: { x: 0.0, y: 0.0, z: 0.0 } , torque: { x: 0.0, y: 14.0 , z: 0.0 } }, duration: -1 }'
In Diamondback, this makes the robot turn; in Electric, for all y torques below ~13, nothing seems to happen; from ~14 on upwards, the robot jumps violently around.
Here's a video of that, too:
http://vimeo.com/30067861
Update 3: Thanks to @hsu, this was fixed in commit 1eafb56786b1f22f67967d1d821f687ac7c827f3, so if you want to reproduce the bug make sure to get an earlier commit than that.
Originally posted by Martin Günther on ROS Answers with karma: 11816 on 2011-10-04
Post score: 1
Original comments
Comment by mbj on 2011-10-16:
@hsu: Before open a new question about my problem I tried, basing on the @Martin Günther problem/solution, to view my robot contacts and bounding boxes. As I can saw, the base_footprint link was contacting with the floor, so I rised up a little this link . In Diamondback version, this fact doesn't cause any problem, but in Electric it makes imposible that the robot can't move forward/backward but it be able to rotate over itself (is ODE more restrictive in this version of Gazebo?). Here is a video where the problem is solved. (http://www.youtube.com/watch?v=SCFir5kbW0M)
Comment by mbj on 2011-10-14:
@hsu: hi john, monday I'll try to reproduce the problem, that is just the contrary that in the diamondback version. The robot turns over itself correctly but it not goes forward/backwards.
Comment by hsu on 2011-10-13:
@mbj: if what you are experiencing turns out to be a different problem, please open a new question with instructions for reproducing the problem. thanks.
Comment by mbj on 2011-10-13:
Hi. I had the same problem when I updated my ROS to the Electric version. I tested changing the mu1, mu2 and other friction values, and furthermore, I tried to change the wheels. I solved temporaly the problem using the Erratic robot wheels that works fine, but there isn't a correct solution, I will test this solution soon.
Comment by hsu on 2011-10-10:
not for now, thanks for the launch commands to reproduce, that'll be handy for trying to figure out what's gone wrong. thanks.
Comment by Martin Günther on 2011-10-09:
@hsu: I'm still having this problem. I guess there were some changes to Gazebo which mean that I have to change my URDF file to work with Electric, but I have no idea how to proceed. Could you reproduce the bug? Is there anything I can do to help?
Comment by Martin Günther on 2011-10-04:
I assume you mean if there are changes between the URDF I use in Diamondback and Electric? No, as I said, the exact same model and controller work in Diamondback, but not Electric.
Comment by hsu on 2011-10-04:
Can you check if there's any changes in the model (XML/URDF)? This way we can isolate the problem. Thank you.
Answer:
I noticed some extra contacts on the ground between the wheels (see photo below), turns out the laptop_structure.dae is touching the ground view below has contacts view enabled. The problem is also visible if you also enable the bounding box visualization under view.
replacing laptop_structure.dae under collision with a small box fixed the problem, but I suspect something might have gone awry with the assimp dae mesh loader.
<?xml version="1.0"?>
<robot
xmlns:xacro="http://ros.org/wiki/xacro">
<include filename="$(find kurt_description)/parts/sick_lms200.urdf.xacro" />
<link name="laptop_structure">
<visual>
<geometry>
<mesh filename="package://kurt_description/meshes/laptop_structure.dae" />
</geometry>
</visual>
<collision>
<geometry>
<!--
<mesh filename="package://kurt_description/meshes/laptop_structure.dae" />
-->
<box size="0.01 0.01 0.01"/>
</geometry>
</collision>
<inertial>
<mass value="2.0" />
<origin xyz="-0.005401183 0.0 0.141356018" />
<inertia ixx="0.022277280" ixy="-0.000000004" ixz="-0.001009999"
iyy="0.017783837" iyz="0.0"
izz="0.025523417" />
</inertial>
</link>
<joint name="top_to_structure" type="fixed">
<parent link="base_link" />
<child link="laptop_structure" />
<origin xyz="-0.05 0 0.03" rpy="0 0 0" />
</joint>
<joint name="structure_to_laser" type="fixed">
<parent link="laptop_structure" />
<child link="laser" />
<origin xyz="0.195 0 0.08" rpy="0 0 0" />
</joint>
</robot>
alternatively, loading laptop_structure.dae with meshlab and saving it as a stl mesh, then modifying urdf collision to use the new stl (keep visual with dae for better aesthetics) also fixes this problem.
This deserves a ticket.
Originally posted by hsu with karma: 5780 on 2011-10-12
This answer was ACCEPTED on the original site
Post score: 2
Original comments
Comment by Martin Günther on 2011-10-12:
Thanks a lot for all your help, it's working perfectly now. | {
"domain": "robotics.stackexchange",
"id": 6852,
"tags": "gazebo, simulation, ros-diamondback, ros-electric"
} |
Speeding up pointcloud delivery to subscriber (kinect) | Question:
Hi,
I am using the Nvidia Jetson + ROS + Freenect_Launch to access data from the Kinect. I am running into an issue (that I don't run into on my intel-i7 laptop) where my node who is subscribing to the /camera/depth/points message, cannot 'receive' fast enough (for my purpose). I have played with different ways of configuring the call-back function (as well as using TransportHints().tcp_nodelay()), and the best I can do is about 7Hz.
I am not doing any processing of the pointcloud in the callback, I just have the subscriber-callback publishing a basic sensor_msg so I can use rostopic hz /mynode/basicsensormsg to see how fast the callback is occuring (about 7Hz).
Same exact node running on my laptop is full 30Hz. When I do a rostopic hz /camera/depth/points, this is also 30Hz.
I believe the Jetson board is bottle-necking during the transferring of the pointcloud data from the launch-node to my written node. I'm wondering if there is a more 'efficient' way of subscribing to such a large portion of data, or if anyone has compiled the freenect_camera driver into their rosnode and could share their experience (I'm moving toward the idea that the pointcloud delivery through ros sensor_msgs is not the right approach, and would rather have a node directly receive from the driver, eliminating needless memory transfer steps).
Any thoughts?
Description of some code I tried:
The callback: void cloud_cb(const sensor_msgs::PointCloud2::ConstPtr& point_cloud) was tried, and this callback defined this way did not have any bottle necks. However, I could not figure out how to use the cloud in a pcl::passthroughfilter() without using 'pcl::fromROSMsg()' first. The pcl::fromROSMsg() caused the 7Hz bottleneck once used in the callback function.
The callback: void cloud_cb(const PointCloud::ConstPtr& point_cloud) was used, and this callback defined this way bottlenecks without any additional code. However, I can directly use the cloud 'point_cloud' in a pcl::passthrough filter, avoiding the need to use 'pcl::fromROSMsg();
Originally posted by dwyer2bp on ROS Answers with karma: 3 on 2015-06-23
Post score: 0
Original comments
Comment by dwyer2bp on 2015-06-29:
https://github.com/johnnyonthespot/ros/blob/master/src/kinect_topdownview/kinect_topdownview.md
My nodelet example code provided.
Answer:
Suggestion: avoid (de)serialisation altogether, and use nodelets.
Edit:
Do you have a suggestion on how one could do this, given the freenect_launch seems to be its own package? freenect_launch advertises the data I need, but I don't know how to go about acquiring the data in a nodelet fashion, [..].
nodelets are a bit of an art. It helps to know that they are essentially plugins to a nodelet manager process. I'm not sure about freenect, but at least openni_* launches a manager, into which you can load your own nodelet. That would receive the pointclouds, exchanging pointers 'under the hood', but looking like regular message exchanges at the application (your node) level. Note that your nodelet can come from (so: be located) anywhere, as long as it is findable by pluginlib. freenect_launch being its own package does not matter.
But, just as @duck-development says, if the Jetson is too slow for all of this, using nodelets won't help you, as the bottleneck is then not the message passing, but somewhere else.
I don't even know where to start.
I'd suggest you first do the nodelet tutorial(s). If required, find some more examples by looking at the packages that use the nodelet package (wiki/nodelet (I'd use the Indigo page, as more pkgs have been released there), Package Links, Used by). image_proc and velodyne_* could be good example material (note that freenect_camera and _launch and openni_* are also listed).
Originally posted by gvdhoorn with karma: 86574 on 2015-06-23
This answer was ACCEPTED on the original site
Post score: 3
Original comments
Comment by dwyer2bp on 2015-06-23:
Do you have a suggestion on how one could do this, given the freenect_launch seems to be its own package? freenect_launch advertises the data I need, but I don't know how to go about acquiring the data in a nodelet fashion, since I don't even know where to start.
Comment by dwyer2bp on 2015-06-24:
Nodelets seem to be the solution from reading about 'why to use them'. I noticed the freenect launch uses nodelets. I will try to understand how I can create a node with the freenect 'nodelet' integrated. Meanwhile, if anyone has already done this I wouldn't mind checking out your code :0)
Comment by dwyer2bp on 2015-06-26:
This fixed my bottleneck! (and I learned a lot more about ROS). Wrote code for a nodelet, and loaded it into the nodelet manager which is started by freenect_launch. My callback now has a 30Hz response on the Jetson.
Thanks!!! | {
"domain": "robotics.stackexchange",
"id": 21993,
"tags": "ros, kinect, freenect, jetson, nvidia"
} |
Seismic migration concepts | Question: In Dr Hua-Wei Zhou's book Practical Seismic Data Analysis (2014), he includes and explains figures from previous classic works (i.e. Schneider, 1971) in order to describe migration from a fundamental level. On pages 212–213, the following figures (and questions) are shown below:
Question 1
For Figure 14, he says that "in the case of a constant velocity model, the equal-traveltime contour will be a hyperbolic surface centered over the image point" - my question is this: why is it a hyperbola and not a semi-circle? In John Claerbot's Imaging the Earth's Interior, he claims that the equation of a wave front a given constant time results in a semi-circle and not a hyperbola.
Question 2
In Figure 13, he also says that "in the case of a constant velocity model, the equal traveltime contour will be an elliptical surface with the source and receiver as the two foci" - again, my question is this: why is this an elliptical surface in this particular scenario? Why not a semi-circle?
Answer: The information you provide is rather limited, however, I want to try and disentangle this. The way I see it, you are mixing three different concepts in your questions. I will work through the semi-circle, through the elliptical towards the hyperbolic assumption.
Semi-Circle
This is a GIF of a central source emiting a wave in a homogeneous space:
You can stop the gif at any time (drag it with your mouse) and all wavefronts will be concentric. You can also see this, when you look at the wave equation. Changes in time equal the velocity times changes in space. If the velocity is constant, a change in time will cause a equal change of wave propagation in any available direction. While often, we work in (x,y,z)-coordinates, here it's easiest to assume that our wave source is at $(0,0,0)$ and if our propagation is equal in all directions, our vector pointing to these points in space is the radius of a perfect sphere.
Of course, if we're looking at a half-space like we do in seismics, we have a half-sphere and if we scale back to 2D we have a semi-circle surrounding our source. So if we dig up a huge patch of subsurface and place receivers all around our source, those equally distant from the source should respond at the same time after igniting the source.
Ellipsis
In Fig. 13 you see the semi-circle. There are several important things to note in this image. The first would be the receiver. The receiver is off-center, but all those receivers in the semi-circle example are too. What makes this different, is that instead of several receivers recording the wave simultaneously, here, it's one singular receiver. Additionally, before our source was nice and centric, now it's off-center.
Unfortunately, this is not enough to make sense of Fig. 13. Firstly, for the semi-circle we were looking at direct waves. In Fig. 13 we see reflected waves. Now, they assume constant velocity again, which means there clearly must be a density change to make a reflection happen. This brings us to a simple theoretical question: "I have a measurement with the traveltime $T$. Which locations in the subsurface would have to have a density change to explain my measurement of $T$?"
So essentially, you are constructing a plane where the ray path from the source toward the plane, added to the ray path from that incident point on the plane to the receiver is always of the length $T$. Luckily, this is exactly how a half ellipse is formed.
You can actually try this with a string. Take a string of the length $T$, fix one end on the source location, one at the receiver location and then put a pen in the sling and trace the outmost location you can reach with the pen in the sling. You have created a surface below your acquisition, which could all have caused a response at your receiver at the time $T$.
Fun fact: If you put the source and the receiver back to location $x=0$, your ellipsis becomes a circle again. It's not the semi-circle from before, as before we had a direct wave, now we're looking at a surface of equal reflection traveltime. You can see this in the following figure CC-BY-SA Ag2gaeh:
Hyperbola
Figure 14 will again take us a step closer to migration. Before, you looked at One source off-center and one receiver off-center. Now we switch gears again. Let's look at the easiest case here, $x=0$. Here the source and the receiver are at the same location, so the travel time $T_0$ to the imaging point I is equal in length before and after reflection from the subsurface point. But from before we know that this place I could be anywhere on a semi-circle in the subsurface (which would be an ellipse, if it wasn't for the fact the $S=R$, as we saw before).
So we need something to convince our math, that instead of placing our image point I "anywhere on a semi-circle", we want the exact location. Here, we have several ways to look at it. If the image point $I$ in the subsurface is actually a point, we call it diffractions. The special property of diffractions is, that they will always reflect your wave right back at you (they diffract the wave, so they scatter the wave in all directions, hence, also back at you). So if you place your source and your receiver at location x=1 in this image, we can construct a triangle between the points $I$, $x=0$ and $x=1$. That means our traveltime T_1 can be calculated with good ol' Pythagoras ($a^2 + b^2 = c^2$):
$$1^2 + (T_0/2)^2 = (T_1/2)^2,$$
and we can do this for every point so that:
$$T_N = 2 * \sqrt(N^2 + T_0^2/4)$$
and then:
$$T_N = T_0 * \sqrt((2N/T_0)^2 + 1)$$
This coincidentally describes the eccentricity of a hyperbola. This is one way to go about finding diffraction hyperbolae, there are several others, but I tried to stick with the provided figures. | {
"domain": "earthscience.stackexchange",
"id": 1413,
"tags": "geophysics, models, seismic"
} |
Binary matrix optimization problem with element co-dependence | Question: Suppose we have an optimization problem where we want to find a binary matrix $A \in \{0, 1\}^{n \times m}$ that minimizes the score function defined as
$$S(A) = \sum^n_i \sum^m_j f_{ij}(a_{i-1j}, a_{ij}, a_{i+1j}, a_{ij-1}, a_{ij+1}) $$
where all $f_{ij}$ are known, real-valued and bound within $[-w, w]$, except for every $f_{ij}(1, 1, \cdot, \cdot, \cdot)$ and $f_{ij}(\cdot, 1, 1, \cdot, \cdot)$ which are $+\infty$. In other words, the individual score of any $a_{ij}$ depends on it and its orthogonal neighbors and no consecutive $1$s are allowed in any column. For the boundary elements, $f_{ij}$ depends only on the subset of its arguments that exist in $A$.
Is there any efficient algorithm to optimally solve it?
Is there any efficient algorithm to approximate it?
Any hints/references as to how I can approach solving/approximating this problem (if possible)?
Answer: If the functions $f_{i,j}$ can be arbitrary (subject to the constraints in your question), then the problem is $\mathsf{NP}$-hard and inapproximable in polynomial-time unless $\mathsf{P}=\mathsf{NP}$.
Unfortunately, the proof is a bit technical.
It is a reduction from rectilinear planar 3-SAT. Essentially you can embed a 3-SAT formula into a grid $A$ (to be filled) and use the functions $f_{i,j}$ to enforce some additional filling rules.
In particular, if $A$ violates some rule we will have $S(A)>0$. If $A$ does not violate any rule we will have $S(A)=0$.
All the rules can be obeyed if and only if the 3-SAT instance admits a solution.
To ease the description allow me to forget about the constraint of not having two consecutive $1$s on a column.
There are essentially four types of cells:
Empty cells: their corresponding function equals $0$ if the value in the cell is $0$ and $1$ otherwise. No other function will depend on these cells. We can essentially set these cells to $0$ and forget about them.
Variable cells: each of these cells is associated with a variable $x_i$ of the 3-SAT instance. Writing $0$ in the cell means setting $x_i$ to false, while writing $1$ means setting $x_i$ to true. You can imagine these cells as generating a true or false "signal". The function associated to these cells is the constant function $0$.
Wire cells: these cells force you to copy the same value of some fixed neighboring cell, effectively carrying the signal from the variable cells to the clause cells (see the next item). Writing the "wrong" value in these cells will cause the associate function to equal $1$. Otherwise, the function will equal $0$.
Clause cells: these cells represent a clause from the 3-SAT instance are always neighbors of $3$ wire cells, which carry the signal encoding the truth values of the variables in that clause. The function equals $0$ if these truth values satisfy the clause and $1$ otherwise. Notice that the function does not depend on the value written in the cell itself.
Here is an example showing a 3-SAT formula, it's planar rectilinear embedding, and the corresponding (still unfilled) grid with empty cells, variable cells, wire cells, and clause cells shown in white, blue, gray, and red, respectively. The arrow in the wire cells shows which value should be copied.
The matrix on the left in the next figure shows a solution $A$ with $S(A)=0$. To handle the restriction prohibiting two consecutive $1$s on the same column it suffices to (i) ensure that, in the embedding, no two wire cells referring to different variables appear as neighbors, (ii) flip the meaning of the values on the wire cells in a checkerboard pattern (see the matrix on the right). | {
"domain": "cs.stackexchange",
"id": 19725,
"tags": "algorithms, optimization"
} |
Forecasting using Python | Question: I have very less training observations (15). I need to predict 6 months into the future. What forecasting model is best suited for this scenario? Here is how my dataset looks
Month | Response Rate |% Promoters |% Detractors |%Neutrals
2019-01-01 | 5% |60% |30% | 10%
2019-02-01
.....
2020-07-01
I need to predict Response Rate, % Promoters, % Detractors and % Neutrals all of which are numeric variables.
I am new to this forum, so pardon me if I have done any mistake while framing the question.
Answer: If you use any forecasting model, then it will do over fitting on your data set, as you have just 19 observations. At least 1000 observations are the good point to start applying any machine learning model.
I need to predict Response Rate, % Promoters, % Detractors and %
Neutrals all of which are numeric variables.
You can do data exploration where you start using bar charts, line charts, and try to find the pattern for each of those variables. Using this, if not accurate, but you would be able to understand lump sum forecasted value for upcoming months.
Even built in functions are there in excel. Check this out: https://www.excel-easy.com/examples/forecast.html | {
"domain": "datascience.stackexchange",
"id": 7929,
"tags": "python, forecasting, data-analysis"
} |
Unpacking a byte into 8 bytes, where the LSB of each byte corresponds to a bit of the original byte | Question: I needed to unpack a byte into the LSBs of each byte in an 8-byte integer. After some testing, I derived a surprisingly efficient and elegant solution that uses magic numbers multiplication, though this is nothing new. Upon further investigation I found a similar solution, and from that I have adopted another.
My first function ex8 stores the least significant bit in the least significant byte. The second least significant bit into the second least significant byte, etc. They are extracted by bitshifting and masking.
The second function ex8l stores the least significant bit in the byte with the least significant memory address. So that will do the same as the other function on little-endian machines, but will have byteswapped results on a big-endian machine. I did that by producing the magic number using a union with individual bytes, so the value will depend on the byte order, and not the other way around.
unsigned long long ex8(unsigned char n) {
return n * 0x2040810204081ULL & 0x101010101010101ULL;
}
#include <assert.h>
unsigned long long ex8l(unsigned char n) {
static_assert(sizeof(long long) >= 8);
const union {
unsigned char b[8];
unsigned long long u;
} static m = {128, 64, 32, 16, 8, 4, 2, 1};
return n * (m.u >> 7) & 0x101010101010101ULL;
}
Answer: Reliance on a certain endian and width
ex8l() code is not portable if a different endian is used. Description has "but will have byteswapped results on a big-endian machine", yet such a critical restriction deserves to be in code and/or use a static_assert().
If unsigned long long is wider than 64-bit, code fails too. Use uint64_t u; in the union instead. Signature unsigned long long ex8l(unsigned char n) remains a good choice.
Unneeded use of LL
0x101010101010101ULL is sufficient as 0x101010101010101U. The LL serves no purpose here.
What if unsigned long long is 128-bit?
0x2040810204081ULL obliges a unsigned long long which may be much wider than 64-bit.
Consider LL may oblige 128-bit operations on future machines - although such machines will likely optimize.
Not a big issue with 0x2040810204081ULL, but code could use UINT64_C(0x2040810204081) to not force excessive wide math.
Wrong test (Covered by @Toby Speight)
static_assert(sizeof(long long) >= 8);, assuming CHAR_BIT == 8 is not needed as C specifies the range of long long requiring at least 64-bits.
What would be useful is static_assert(CHAR_BIT == 8); as OP's code relies on that. | {
"domain": "codereview.stackexchange",
"id": 45391,
"tags": "c, bitwise, portability"
} |
how is the PYTHON_EXECUTABLE variable/option is set with catkin? how can i set its default? | Question:
I know that the "python-problem" in arch linux is already known, and i don't like it. I've managed to install ros-hydro from source in my arch, which took a lot of time for me. I've replaced all calls to python for a explicit python2 call with grep and sed, like this:
grep -rl ./ "/bin/env python" | xargs sed -i 's/env python/env python2/g'
"#!/bin/env python" -> "#!/bin/env python2".
But, even with that, i had to set the option -DPYTHON_EXECUTABLE=python2 manually on the command line. I've tried to figure out how i could set some kind of default in the sources, so i put this line on the "python.cmake" file, in catkin/cmake/:
set(PYTHON_EXECUTABLE python2).
that works fine, but it just doesn't seem the correct way to do it, because if i do the following to build ros: ./src/catkin/bin/catkin_make_isolated -DPYTHON_EXECUTABLE=python3.3 for example, it builds normally, like if PYTHON_EXECUTABLE option became constant somehow.
could someone explain me how this variable is set, and how i could make this solution seem more "correct"... and, why doesn't the official sources make explicit calls for python2, instead of python? wouldn't that make ros more portable? I also don't like the way it feels like the only supported platform is ubuntu.
Originally posted by Edno on ROS Answers with karma: 46 on 2014-01-04
Post score: 2
Original comments
Comment by tfoote on 2014-01-05:
@Dirk Thomas I retagged this if you didn't get the notification.
Answer:
The upcoming version of catkin allows you to consistently set the Python version being used within the build process (https://github.com/ros/catkin/issues/570). Therefore you have to specify the option -DPYTHON_VERSION=3.3 when invoking CMake the first time (the number refers to the suffix of the python executable, it could also be 3 only). This will address that all scripts during the build process are invoked with a specific Python version and packages which link against the Python libraries can pick the correct library version.
While Python scripts installed by the setup.py file have their shebang lines updated automatically other scripts installed from CMake are currently not updated like that. We will work on a CMake function which will perform the same shebang rewriting for files installed using this new function (https://github.com/ros/catkin/pull/574).
Regarding the "default" shebang line ROS is currently neither Ubuntu specific nor will we change the default invocation to python2. The "problem" here is that arch is actually not following the official Python guidelines described in http://www.python.org/dev/peps/pep-0394/ It clearly states that python should always point to a Python 2 interpreter (for very good reasons). ROS will follow this recommendation which is used by most of the distributions.
Originally posted by Dirk Thomas with karma: 16276 on 2014-01-06
This answer was ACCEPTED on the original site
Post score: 4 | {
"domain": "robotics.stackexchange",
"id": 16570,
"tags": "catkin, ros-hydro, build, catkin-make-isolated, source"
} |
Solution of Time Independent Schrodinger Equation | Question: In the book Modern Physics by Serway/Moses/Moyer (on page 200,chapter Quantum mechanics in one dimension) it is written that (or what i have understood)when there are no forces on a particle solution of schrodinger equation in separable form is identical to that of a plane wave. But the wavefunction associated with a particle has to be confined to a region. Then how it is possible?
Answer: The time-independent Schrödinger equation for a free particle
(i.e. with a potential energy $U(x)=0$)
$$-\frac{\hbar^2}{2m}\frac{d^2\psi(x)}{d x^2}=E\psi(x)$$
has indeed the solutions
$$\psi(x) = Ae^{ikx} \quad\text{with any real }k$$
for the energy $E=\frac{\hbar^2k^2}{2m}$,
as can easily be verified.
Obviously these solutions are not localized in space.
Instead they are evenly spread across the whole space
from $-\infty$ to $+\infty$.
A stringent mathematician might complain, that these solutions
cannot be normalized to $\int_{-\infty}^{+\infty}|\psi(x)|^2 dx=1$.
But for a pragmatic physicist this is no problem.
Such a solution just represents a particle moving with a perfectly
certain momentum $p=\hbar k$ (thus $\Delta p=0$).
Therefore, according to Heisenberg's uncertainty principle,
it has a completely uncertain position $x$ (i.e. $\Delta x=\infty$). | {
"domain": "physics.stackexchange",
"id": 95211,
"tags": "quantum-mechanics, wavefunction, schroedinger-equation, scattering"
} |
How does radio receives signal from particular station? | Question: When you tune your radio (digital or analog) to receive say 100 MHz frequency and while in the environment there are hundreds of channels everywhere around the radio. How does it chooses to receive the frequency only at 100 MHz? How does the radio receiver works? Is it possible to explain this in simple concept?
Answer: This is a resonance in the circuit--- when you have a bunch of different frequencies driving a resonant system, the response is only strong for those frequencies which are close to the natural frequency of the resonant oscillator.
You can see the same phenomenon in mechanical systems. If you have a mechanical mass on a spring, and you apply a force which varies with time, the amplitude of oscillation is
$$ { F(\omega)\over \omega^2 - \omega_0^2 + i\Gamma} $$
Where $F(\omega)$ is the Fourier component of the force at the frequency $\omega$, $\omega_0$ is the natural frequency of the oscillation, and $\Gamma$ is a small damping parameter. In the limit of small $\Gamma$, you pick out only the Fourier component of F near the resonant frequency, those components which are different in frequency cancel because they push and pull at the wrong time given the natural vibration frequency of the oscillator.
This natural Fourier transform property of linear oscillators is the basis of human hearing, where the hairs in the ear are tuned to resonate only very close to one frequency. It is also the basis for radio tuning, or any other linear frequency sensitive response. | {
"domain": "physics.stackexchange",
"id": 2235,
"tags": "electromagnetic-radiation, everyday-life, radio, resonance"
} |
Is device mounted? Both UUID and device names accepted | Question:
I am trying to write is_device_mounted script, which in turn will serve a greater purpose in my home Linux system.
It does not even have an error reporting function included and as you can see, I have made it clean for readers. My intention here is to review the code for general Linux. But if you are on a *BSD, I would appreciate your feedback too!
The first version of the script follows:
#!/bin/sh
set -eu
is_empty_string() { [ -z "${1}" ]; }
sanitize_device_string() { printf '%s' "${1}" | grep '^/dev/' | head -n 1; }
is_block_device() { [ -b "${1}" ]; }
is_device_uuid_identified() { printf '%s' "${1}" | grep -F '/by-uuid/'; }
translate_uuid_to_device_name() { readlink -f -n /dev/disk/by-uuid/"${1}"; }
is_device_mounted()
{
if [ -n "${device_name}" ]; then
# 1. basic regex should be working across platfotms
# tested on FreeBSD, OpenBSD, NetBSD with success
# I prefer the starting with (^) rather than filtering throung all text
# 2. /proc/mounts is not available on all *BSDs, needs revision
proc_mounts=$( grep "^${device_name} " /proc/mounts )
[ -n "${proc_mounts}" ]
fi
}
[ "${#}" -ne 1 ] && { echo "Invalid number of arguments."; exit 1; }
readonly raw_input_string=${1}
is_empty_string "${raw_input_string}" && { echo "The given argument is empty."; exit 1; }
readonly device_string=$( sanitize_device_string "${raw_input_string}" )
is_empty_string "${device_string}" && { echo "The given argument is not a device path."; exit 1; }
! is_block_device "${device_string}" && { echo "The given argument is not a block device."; exit 1; }
readonly block_device=${device_string}
if is_device_uuid_identified "${block_device}"
then
readonly device_name=$( translate_uuid_to_device_name "${block_device}" )
else
readonly device_name=${block_device}
fi
if is_device_mounted "${device_name}"
then
echo "The device: ${block_device} IS mounted."
else
echo "The device: ${block_device} IS NOT mounted."
fi
Answer: Nice work. I approve of set -eu, and the script pleases Shellcheck.
Here's the things I'd consider changing.
I think error messages should go to the error stream. Example:
[ "${#}" -ne 1 ] && { echo "Invalid number of arguments." >&2; exit 1; }
# ^^^ here
Instead of using the negation operator, I'd replace the form ! test && error with plain test || error like this:
is_block_device "${device_string}" || { echo "The given argument is not a block device." >&2; exit 1; }
The script doesn't work when I use other links to block devices, such as those in /dev/disk/by-label. I'd fix that by abandoning the /by-uuid/ test, and instead following symlinks until a real file or dangling link is found:
resolve_symlink() {
f="$1"
while [ -h "$f" ]
do f=$(readlink -f -n "$f")
done
printf '%s' "$f"
}
is_empty_string "${device_string}" && { echo "The given argument is not a device path." >&2; exit 1; }
is_block_device "${device_string}" || { echo "The given argument is not a block device." >&2; exit 1; }
readonly device_name=$(resolve_symlink "$device_string")
if is_device_mounted "$device_name"
then
Why does is_device_mounted ignore its argument and use $device_name instead?
Minor issue: we assume that the block device name contains no regex metacharacters here:
grep "^${device_name} "
That's probably a fair assumption on a non-weird Linux system; I normally use Awk for robust versions such tests ($1 = $device_name, with a suitable -v option) but I don't know how well that meets your portability goals.
If using grep (without the non-standard -q option), then it's usual to discard the output, and use grep's exit status directly, rather than capturing the output and testing it's non-empty.
Minor/style: I'm not a big fan of using braces for every variable expansion. I prefer to reserve them for when they are really needed, and that seems to be the usual idiom. | {
"domain": "codereview.stackexchange",
"id": 34098,
"tags": "linux, posix, sh"
} |
Why does the conversion of SO₂ to SO₃ require high pressure? | Question: The conversion of sulfur dioxide to sulfur trioxide in the contact process for the manufacture of sulfuric acid requires the following condition:
Temperature: 450 degrees
Pressure: 1-2 atm, although High pressure would fasten the reaction, creating those pressures is not economical
Vanadium Pentoxide ($\ce{V2O5}$) as catalyst
and excess of oxygen
$\ce{2SO2 + O2 -> 2SO3}$
My textbook says that high pressure favors the rate of the reaction. The reason it states is that the volume of product is less than that of the reactants.
Can somebody explain how the volume of the product being less causes high pressure to be required?
Answer: As Philipp stated in the comments it is Le Chatelier's principle in play.
The principle in layman's language states that
If you try to bring about any physical change in a system the system will try to but not necessarily succeed in cancelling the change.
The reaction you have given is an equilibrium reaction where all the compounds are in gaseous state.
Now if you count the total number of moles on the left-hand side it will be $\ce{2SO2 + O2}$
i.e. $\ce{2 + 1}$ that is $\text{3}$.
And on the right-hand side there are only $\text{2}$ moles of $\ce{SO3}$.
So ultimately if you consider the reaction to proceed in forward direction 3 moles of gas are converting into 2 moles of gas and hence the volume decreases in forward direction.
If you think according to Gas Law(or logically) an increase in pressure generally corresponds to a decrease in volume.
Now use Le Chatelier's principle to say that if I increase the pressure on the the system ,in order the system
to counteract to the change will decrease it's own volume and it has no other way of achieving that but to proceed in the forward direction.
But if the query arises that high pressure should favour any and every reaction take a look at the example below:
$$\ce{PCl5 <=> PCl3 + Cl2}$$
Here the gaseous moles on the left-hand side is $\ce{1}$ and on the right-hand side $\ce{2}$. So increasing pressure makes the reaction proceed in the backwards direction as there are less number of gaseous moles on the left-hand side.
In fact, in higher level this idea is what is called $\Delta\text{n}_\text{g}$ i.e the difference($\Delta$) of gaseous($_g$) moles($\ce{n}$).
$$\Delta\text{n}_\text{g} \quad = \text{gaseous moles on right-hand side - gaseous moles on left-hand side}$$
If $\Delta\text{n}_\text{g}$ is positive, then on increasing pressure reaction proceeds backwards and vice-cersa | {
"domain": "chemistry.stackexchange",
"id": 291,
"tags": "gas-laws, vapor-pressure"
} |
Finding unique items in the array | Question: I was reading this code where author claims that its the fastest implementation which I doubt, hence I decided to write my version of the same problem.
'use strict';
module.exports = function unique(arr) {
if (!Array.isArray(arr)) {
throw new TypeError('array-unique expects an array.');
}
return arr.filter(function(element, index) {
return arr.indexOf(element) === index;
});
};
The above code looks readable to me but I don't about its performance(I think mine should be faster).
Answer: Your implementation is elegant, clean, easy to read.
The other is less so, but there is a significant difference between the two: your version returns a new array of unique values, the other removes duplicate values, modifying the input array.
Both algorithms are \$O(n^2)\$. Your implementation includes one redundant comparison per element: when the indexOf call reaches the current element, it compares it to itself, which is a redundant comparison for your ultimate intent of finding unique values. The other implementation optimized this with the var j = i + 1; step: it compares the current element to all subsequent element, never to itself.
In the end, the performance difference will come down to the implementation of splice and new array creation of the given JavaScript engine. I don't think such comparison will be very useful, except in very rare extreme cases. Also keep in mind that premature optimization is the root of all evil. And once again, there's also the matter of the different behavior of the implementations (return new unique array or mutate the parameter in-place), which makes comparisons problematic.
For what it's worth, in cases when an \$O(n^2)\$ solution to this problem is good enough (as opposed to an \$O(n)\$ solution using \$O(n)\$ extra space), I would go for your version, because it's intuitively easier to read. | {
"domain": "codereview.stackexchange",
"id": 20909,
"tags": "javascript, performance, algorithm, array"
} |
What is the difference between classical correlation and quantum correlation? | Question: What is the difference between classical correlation and quantum correlation?
Answer: Correlation is first and foremost a term from statistics. Given a system that consists of two (or more parts), it quantifies how much I can predict about the second system if I have knowledge of the first in comparison to how much I can predict about the second system without that knowledge.
For instance, if I have a bag of pieces of paper printed with either the combination 00 or 11 with equal probability and I randomly pick a piece and only look at one of the two numbers, then I know the other number perfectly, while if I don't look at the piece of paper at all, I can only guess and will be wrong 50% of the time. Clearly, knowing part of the system helps me a lot.
Now let's consider physics: In classical mechanics, you can also consider statistical systems (such as many planets in a system or a bunch of particles in a box) and ask questions about that system (for example: what's the temperature of the box? How many particles will be faster than some speed x? How many particles will be in one half of the system when I measure?). If you divide your system, you have the same situation as above and you can ask whether the knowledge of part of the system lets you infer something about the other part of that system. Since we have a system in the theory of classical mechanics, those could be called "classical correlations".
With quantum mechanics, you have a different theory of how our world works which possesses a different mathematical description. In particular, all quantum mechanical systems are statistical systems and you can ask a bunch of different questions about them - for example the spin of a particle in a particular direction, etc. If you take a system consisting of two (or more) parts, you can once again ask some questions and check how well you can predict the outcome of the second half of the system knowing the first. Your results will once again depend on the correlation of this system and you would call this "quantum correlation".
It's that simple. But there is a caveat:
Often, you will hear "quantum vs. classical correlations". What people mean by that is that in quantum mechanics, you can have degrees of correlation that are impossible to achieve if you model the system with classical mechanics. In a sense, those correlations are "purely quantum mechanical". Some people say that systems show "quantum correlations" only if they cannot be modelled by classical mechanics, otherwise they show "classical correlations". In other words: Given the terminology above, classical correlations are what I called classical correlations and quantum correlations are what I called quantum correlations minus those where I can get the same outcome using classical mechanics (for instance: I can model a quantum system with two parts where I can make a measurement - call it "color" - and the outcome on both parts is completely random: knowing one part doesn't tell me anything about the other part. Obviously, I would get the same outcome if I modelled this system with an urn of infinitely many billard balls with two colours each occuring with 50% probability and I draw two balls and try to predict the colour of the second ball knowing the colour of the first).
This terminology also makes sense, because the correlations that are not classical are mostly the interesting types of correlation, because they tell us that for such systems, classical mechanics is indeed the wrong theory and we must take quantum mechanics to describe it accurately. If you want to learn more about that, I suggest you read about Bell inequalites.
Last but not least:
You can also construct mathematical systems that allow for even higher degrees of correlation (in terms of predictiveness) than quantum mechanics, but we have not yet found any physical system with such properties. In other words, quantum mechanics isn't really that special with regards to correlations. | {
"domain": "physics.stackexchange",
"id": 90270,
"tags": "quantum-mechanics, quantum-information, quantum-entanglement, decoherence"
} |
Gain at given frequency from $z$-plane zero-pole plot. Two methods gives different results | Question: I have two zeros at $z=-1$ and two complex conjugate poles at $z=A\cos\theta\pm jA\sin\theta$
This gives me the next transfer function
$$H(z)=\frac{1+2z^{-1}+z^{-2}}{1-2A\cos\theta z^{-1}+A^2z^{-2}}$$
To get filter's DC gain I'm trying to substitute $z$ with $1$:
$$G=\frac{4}{1-2A\cos\theta+A^2}$$
Following graphical method described here (gain as a product of lengths of vectors from every zero to given point divided by product of lengths of vectors from every pole to given point) I've got next formula:
$$G=\frac{2}{\sqrt{1-2A\cos\theta+A^2}}$$
Where is my mistake? Why I've lost square root in the first case?
Answer: I think you lost the square root somewhere in your graphical method: Considering the nominator (product of lengths of vectors from every zero given point), I end up with
$$N=2*2$$
i.e. you have two vectors of length 2 (two zeros at z=-1).
Then, for the denominator you have
$$
D = \sqrt{(A\sin\theta)^2+(A\cos\theta-1)^2}^2=A^2-2A\cos\theta+1
$$
And finally $G=N/D$ yields the same result as your analytical solution. | {
"domain": "dsp.stackexchange",
"id": 4607,
"tags": "z-transform, transfer-function, poles-zeros"
} |
Black squirrel in France? | Question: I saw a nearly jet black squirrel in ardeche today, carolinensis have invaded the UK and Italy, however I didn't find news about their arrival in France. Was that black squirrel really a Canadian species?
Answer: Yes, Sciurus carolinensis has been invasive in France, Italy and Switzerland for quite a few years already (Bertonlino and Genovesi, 2003).
Note that the "original" distribution of S. carolinensis is over eatern Canada and USA. Not Canada only. | {
"domain": "biology.stackexchange",
"id": 9139,
"tags": "species-identification, mammals, invasive-species"
} |
Why does the inhomogenous wave equation solution change form with a different number of spatial dimensions? | Question: Assume $c = 1$ for what follows.
For the general inhomogenous wave equation in one spatial dimension $$\left(\frac{\partial^2}{\partial t^2} - \frac{\partial^2}{\partial x^2}\right)\phi = v(x, t),$$ the article The Wave Equation with a Source gives (where we ignore the homogenous part of the solution, e.g. by setting initial conditions to $0$): $$\phi(x, t) = \frac{1}{2} \int_0^t \int_{x - (t - s)}^{x + (t - s)} v(y, s)\ dy \ ds.$$
For the general inhomogenous wave equation in three spatial dimensions $$\left(\frac{\partial^2}{\partial t^2} - \nabla^2\right)\phi = v({\bf r}, t),$$ the article Solution of Inhomogeneous Wave Equation gives
$$\phi({\bf r}, t) = \int \frac{ v({\bf r}', t - \vert{\bf r} -{\bf r}'\vert)} {4\pi \vert{\bf r} - {\bf r}'\vert}\ dV'.$$
Why does the integrand in the three-dimensional case depend inversely on $\vert{\bf r} - {\bf r}'\vert$, while the integrand in the one-dimensional case doesn't?
Answer: The wave equation is linear, so the inhomogeneous problem:
$$
(\partial_t^2-\Delta)\phi=v
$$
can be solved by a convolution with the Green’s function:
$$
\phi=G*v\\
(\partial_t^2-\Delta)G=\delta(x,t)
$$
Your question can therefore be reformulated as explaining the spatial dimension $D$ dependence of power law of $G$.
This can be partially tackled by dimensional analysis. Setting $c=1$, $x,t$ have dimension $1$. The delta function has dimension $-(D+1)$. A partial derivative has dimension $-1$ so the d’Alembertian has dimension $-2$. Therefore, $G$ has dimension $1-D$. You therefore expect that:
$$
G\sim r^{1-D}
$$
This gives the correct scaling in 1D and 2D, but not in 3D.
This is because you have an extra Dirac delta factor $\delta(r-t)$ which has dimension $-1$. Correcting this you do get for $D=3$:
$$
G\sim r^{-1}
$$
while still being consistent with dimensional analysis. This extra Dirac delta factor has a simple interpretation: it is Huygens’ principle.
In retrospect, the scaling could have been anticipated. However, it is best to do the mathematical derivation and then interpret the calculation. A priori hand waving arguments can easily give the wrong result.
Hope this helps. | {
"domain": "physics.stackexchange",
"id": 94917,
"tags": "waves, field-theory, spacetime-dimensions, greens-functions"
} |
Will A* in AMCL direct robot into grey area on map | Question:
I would like to edit the map produced by a SLAM algorithm, to add in no go areas. These areas do not ave a physical barrier. Would greying them out be sufficient to prevent the A* algorithm used by AMCL from directing the robot into them?
If not, how would one accomplish this?
Also, can AMCL make use of .png maps, or does it have to be the default pgm?
Thanks
Originally posted by Ben12345 on ROS Answers with karma: 98 on 2018-02-12
Post score: 0
Answer:
I've manually edited maps using GIMP and had great success with keeping planners from making paths through unwanted areas. I used black instead of gray as that corresponds to "very" occupied. From the wiki:
When comparing to the threshold parameters, the occupancy probability of an image pixel is computed as follows: occ = (255 - color_avg) / 255.0, where color_avg is the 8-bit value that results from averaging over all channels, e.g. if the
image is 24-bit color, a pixel with the color 0x0a0a0a has a probability of 0.96, which is very occupied. The color
0xeeeeee yields 0.07, which is very unoccupied.
As black is 0x000000 it should yield 100% (or very) occupied. Gray may not result in an area being considered occupied.
Edit:
According to the wiki that I linked to, you can use a PNG instead of PGM, but PNG is not supported on OS X.
As @Procópio stated in the comments
painting the map black will have the
side-effect of degrading localization,
as the real LIDAR measurements will
not match those of the edited map.
Originally posted by jayess with karma: 6155 on 2018-02-12
This answer was ACCEPTED on the original site
Post score: 0
Original comments
Comment by jayess on 2018-02-12:
Note that AMCL is not going to be planning any paths for you. It will localize your robot, but not generate any paths.
Comment by Procópio on 2018-02-13:
painting the map black will have the side-effect of degrading localization, as the real LIDAR measurements will not match those of the edited map.
Comment by jayess on 2018-02-13:
@Procópio: good point
Comment by Ben12345 on 2018-02-13:
Yes, i said A* in AMCL.
I agree with Procòpio, black denotes a physical barrier tat is not there in the real world and will affect the localisation. Surely the A* used would not direct the robot into unexplored territory?
Comment by jayess on 2018-02-13:\
Please do not comment if you do not have anything constructive to add
? | {
"domain": "robotics.stackexchange",
"id": 30026,
"tags": "ros, navigation, mapping, pgm, amcl"
} |
Passing uint64_t from C++ to R: Hilbert Mapping - xy2d function | Question: I have been working with Rcpp to perform a forward and backward Hilbert Mapping. Below is an implementation based on this code.
My application is in genomics and I may be dealing with enormous datasets, which necessitates the use of very large integers for indices, so I found this code for passing large integers to R using Rcpp and the bit64 R package and incorporated it after the for loop.
The xy2d() function works properly. My interest is on your thought regarding the code AFTER the for loop, which prepared the result for passage back to R. Please let me know what you think :)
#include <Rcpp.h>
using namespace Rcpp;
# include <bitset>
# include <cstdint>
# include <ctime>
# include <iomanip>
# include <iostream>
using namespace std;
//****************************************************************************80
// [[Rcpp::export]]
Rcpp::NumericVector xy2d ( int m, uint64_t x, uint64_t y )
//
//****************************************************************************80
{
uint64_t d = 0;
uint64_t n;
int rx;
int ry;
uint64_t s;
n = i4_power ( 2, m );
if ( x > n - 1 || y > n - 1) {
throw std::range_error("Neither x nor y may be larger than (2^m - 1)\n");
}
for ( s = n / 2; s > 0; s = s / 2 )
{
rx = ( x & s ) > 0;
ry = ( y & s ) > 0;
d = d + s * s * ( ( 3 * rx ) ^ ry );
rot ( s, x, y, rx, ry );
}
std::vector<uint64_t> v;
v.push_back(d);
//v[0] = d
size_t len = v.size();
Rcpp::NumericVector nn(len); // storage vehicle we return them in
// transfers values 'keeping bits' but changing type
// using reinterpret_cast would get us a warning
std::memcpy(&(nn[0]), &(v[0]), len * sizeof(uint64_t));
nn.attr("class") = "integer64";
return nn;
}
This post will be followed up shortly with another post regarding the rot() function, and well as the reverse d2xy() function
Answer:
Because we write C++, there's no reason to declare (most) variables at the beginning of the scope of your function. For example, all of d, rx, ry and s have been declared for no reason if you happen to throw and exit the function.
You don't need s after the for-loop, so it should be local to the loop only. Similar for rx and ry.
Make use of shorthand operators like /= and +=.
You can make v const and initialize it with a suitable constructor, in this case std::vector<uint64_t> v(1, d); initializes v to hold one element equal to d. But really, as it stands, I see no point in using an array here if you just have a single value (I suspect your example is incomplete and not representative of your real use case, which is a shame).
Because len can be const, make it const. This protects from unintended errors and possibly allows the compiler to perform more optimizations.
As a general comment, avoid saying using namespace std;, it's not good for you.
I don't know the interface of Rcpp::NumericVector, but might you initialize it directly in the spirit of const Rcpp::NumericVector nn(v.cbegin(), v.cend()), for instance? | {
"domain": "codereview.stackexchange",
"id": 33968,
"tags": "c++, c++11, r, coordinate-system, rcpp"
} |
Newton's law of restitution | Question: Newton's law of restitution. Could someone tell me what the easiest form of this law to use is?
I usually try to use e=(v(2)-v(1))/(u(1)-u(2)).
Does the law also work if I don't know the direction of v(1) and v(2)?
So for example, if a particle moving with u(1) hits a steady object and I don't know if the particle's speed vector will change its direction, can I still use the law in the form e=(v(2)-v(1))/(u(1)-u(2)) with u(2)=0 (since the object was steady originally)?
Answer: The law states this:
Two objects masses and with velocities $m_1$, $m_2$, $\vec{v}_1$ and $\vec{v}_2$ collide with contact normal $\vec{n}$.
The final velocities are $\vec{v}_1^\star$ and $\vec{v}_2^\star$ such that the coefficient of restitution $\epsilon$ is defined by $$\vec{n}\cdot \left( \vec{v}_2^\star - \vec{v}_1^\star \right) = -\epsilon \;\left( \vec{n}\cdot \left( \vec{v}_2 - \vec{v}_1 \right) \right)$$
If you consider the impact speed along the contact normal $v_{impact} =\vec{n}\cdot \left( \vec{v}_2 - \vec{v}_1 \right)$ and the rebound speed along the contact normal $v_{rebound} =\vec{n}\cdot \left( \vec{v}_2^\star - \vec{v}_1^\star \right)$
The collision law can now be stated as $$\boxed{ v_{rebound} = -\epsilon \;v_{impact} }$$
The last part of the impact mechanics states that there is an exchange in momentum along the contact normal with equal and opposite parts on the two masses $$m_1 \vec{v}_1\star = m_1 \vec{v}_1 - J\, \vec{n}\\m_2 \vec{v}_2\star = m_2 \vec{v}_2 + J\, \vec{n}$$
Combining the above gives the law $$ J =(1+\epsilon) \frac{m_1 m_2}{m_1+m_2} v_{impact} $$
NOTE: This is the easiest way for me to understand cotacts, $\cdot$ is the vector dot product and $\vec{n}\cdot\vec{n}=1$. | {
"domain": "physics.stackexchange",
"id": 22600,
"tags": "newtonian-mechanics, momentum, collision"
} |
Contacts list manager | Question: I just learned about arraylists and I got a mission to build Mobile phone application with contact list when you can add \ remove \ modify \ search for contacts.
If you think I should add something to the code feel free to tell me :)
MobilePhone class
public class MobilePhone{
private String phoneNumber;
public ArrayList<Contact> myContacts = new ArrayList<Contact>();
public MobilePhone(String phoneNumber) {
this.phoneNumber = phoneNumber;
}
public void printContactList() {
if (!myContacts.isEmpty()) {
for (int i = 0; i < myContacts.size(); i++) {
System.out.println(i + 1 + ". " + "Name: " + myContacts.get(i).getName() + " || Phone number: " + myContacts.get(i).getPhoneNumber());
}
} else {
System.out.println("Your contact list is empty!");
}
}
public void addContact(String name, String phoneNumber) {
if (searchContactByPhoneNumber(phoneNumber) == -1) {
Contact contact = new Contact(name, phoneNumber);
myContacts.add(contact);
System.out.println("Contact " + name + " with phone number " + phoneNumber + " just added!");
} else {
System.out.println("This contact is already on your list.");
}
}
public void removeContact(String phoneNumber) {
int index = searchContactByPhoneNumber(phoneNumber);
if (index >= 0) {
System.out.println("You have removed " + myContacts.get(index).getName());
myContacts.remove(index);
}
}
public int searchContactByPhoneNumber(String phoneNumber) {
for (int i = 0; i < myContacts.size(); i++) {
if (phoneNumber.equals(myContacts.get(i).getPhoneNumber())) {
System.out.println(myContacts.get(i).getName() + " Found!");
return i;
}
}
return -1;
}
public int searchContactByName(String name){
for(int i =0; i<myContacts.size(); i++){
if(name.equals(myContacts.get(i).getName())) {
return i;
}
}
return -1;
}
public void changeContact(String oldName, String newName) {
int index = searchContactByName(oldName);
if(index >=0){
Contact updatedContact = new Contact(newName,myContacts.get(index).getPhoneNumber());
myContacts.set(index,updatedContact);
System.out.println("You have changed contact " +oldName + " to " + newName + "\n" +
"Phone number: " +myContacts.get(index).getPhoneNumber());
} else {
System.out.println("No contact named " + oldName + " on your contact list");
}
}
}
Contact class
public class Contact {
private String name;
private String phoneNumber;
public Contact(String name, String phoneNumber) {
this.name = name;
this.phoneNumber = phoneNumber;
}
public String getName() {
return this.name;
}
public String getPhoneNumber(){
return this.phoneNumber;
}
}
PrintService class
public class PrintService {
public static void printMenu() {
System.out.println("Press:" + "\n" +
"\r" + "1. Show contact list" + "\n" +
"\r" + "2. Add an contact" + "\n" +
"\r" + "3. Remove an contact" +"\n" +
"\r" + "4. Search for an contact" + "\n" +
"\r" + "5. Change info about some contact" +"\n"+
"\r" + "6. Exit.");
}
}
Main class
public class Main {
private static final Scanner sc = new Scanner(System.in);
private static final MobilePhone mobilePhone = new MobilePhone("123456789");
public static void main(String[] args){
boolean exitRequested = false;
while(!exitRequested) {
PrintService.printMenu();
int options = Integer.parseInt(sc.nextLine());
switch (options) {
case 1:
mobilePhone.printContactList();
break;
case 2:
addContact();
break;
case 3:
removeContact();
break;
case 4:
searchContact();
break;
case 5:
changeContact();
break;
case 6:
exitRequested = true;
break;
}
}
}
private static void addContact() {
System.out.println("Name?");
String name = sc.nextLine();
System.out.println("Phone number:");
String phoneNumber = sc.nextLine();
if (phoneNumber.length() != 10) {
System.out.println("Wrong input!");
} else {
mobilePhone.addContact(name, phoneNumber);
}
}
private static void removeContact(){
System.out.println("Which contact would you like to remove?" +"\n" +
"Please type phone number.");
String phoneNumber = sc.nextLine();
mobilePhone.removeContact(phoneNumber);
}
private static void searchContact(){
System.out.println("Please enter phone number ");
String phoneNumber = sc.nextLine();
if(mobilePhone.searchContactByPhoneNumber(phoneNumber) == -1) {
System.out.println("No contact found with phone number " + phoneNumber);
} else {
mobilePhone.searchContactByPhoneNumber(phoneNumber);
}
}
private static void changeContact(){
System.out.println("Which contact would you like to modify?");
String currentName = sc.nextLine();
System.out.println("Enter your modify");
String updatedName = sc.nextLine();
mobilePhone.changeContact(currentName,updatedName);
}
}
Thank you very much!
Answer: What I like
You follow the Java Naming conventions and the names you choose for your identifiers are pretty good.
What I don't like
Unnecessary mutability
The member variable phoneNumber in your class MobilePhone does not change during the objects life time, therefore it should be declared final. The same applies to name and phoneNumber in your class Contact.
Inappropriate visibility
The member variable myContacts in your class MobilePhone is declares public. That means that its content can be changed from anywhere outside the class MobilePhone. This violates the encapsulation/information hiding principle, one of the most important concepts of object oriented programming.
Using indexes instead of objects
Your search* methods return indexes instead of the real objects. Therefore the caller of that methods must access the list again in case it wants to do something with the data of that particular contact (which is almost always the case...). So you better return the contact object itself.
Inline signalling of errors
In case your search* method finds nothing you return a special value. In Java we have the concept of Exceptions to handle this kind of Problem.
Yes, there is a rule, that you should not miss use Exceptions as control flow. But they are exactly for this particular purpose, avoiding this in band signalling of a special case when returning a processing result.
In your small project Exceptions will basically replace the if/else statements with try/catch blocks which does not look like a benefit at all. But in larger projects it is very likely that the error case can not be handled by the direct calling method but by some other method way up in the call stack. Then only the code able to deal with that problem needs to know that it may occur. Without the use of exceptions any method way down in the call stack needs this if statements.
Another way to go around this is the use of Optional as the return value. But that is just another special value in my opinion.
Naming
As I initially wrote your naming is pretty good, with one minor exception. In your main method the local variable options will always contain a single user choice. Therefore it should not have the plural s.
Having written that, the name option might not be that good at all and should rather be selectedOption or just choice...
Answer to comment
I've tried before to make all methods (Contact contact) but fo real I got stuck hard. – עמית שוקרון
That should be quite easy.
public class MissingContact extends RuntimeException{
public MissingContact(String message){
super(message);
}
}
public Contact searchContactByPhoneNumber(String phoneNumber) {
for (contact: myContacts) {
if (phoneNumber.equals(contact.getPhoneNumber())) {
System.out.println(contact.getName() + " Found!");
return contact;
}
}
throw new MissingContact("No contact with number "+phoneNumber);
} // search by name similar
To avoid the Exception handling you should apply the check, then act pattern by adding appropriate check methods:
public boolean hasContactWithPhoneNumber(String phoneNumber) {
for (contact: myContacts) {
if (phoneNumber.equals(contact.getPhoneNumber())) {
System.out.println(contact.getName() + " Found!");
return true;
}
}
return false;
} // searchByName similar | {
"domain": "codereview.stackexchange",
"id": 41059,
"tags": "java"
} |
Have metals's "electron sea" electrons effect on x-ray diffraction? | Question: Can it be possible that there is no effect come from the "electron sea" because of there is no net electron flow, or that flow creates a fuzziness in results?
Answer: Electrons in metals are described quantum mechanically by Bloch waves $$\psi(\vec r)=u_{\vec k}(\vec r)\exp{i(\vec k·\vec r)}$$ where $u_{\vec k}(\vec r)=u_{\vec k}(\vec r+\vec R)$, $\vec R$ is a lattice vector, has the periodicity of the crystal lattice. Therefore, the location probability of the electrons and thus their average density is given by $$ \psi^*(\vec r)·\psi(\vec r)=|\psi(\vec r+\vec R)|^2$$ has also the periodicity of the lattice. Thus x-ray scattering from electrons in a metal including from conduction electrons is determined by the lattice structure. | {
"domain": "physics.stackexchange",
"id": 35378,
"tags": "diffraction, electronic-band-theory, metals, x-rays, x-ray-crystallography"
} |
Why joint probability in generative models? | Question: I have been reading about generative models for past few days and there's one question that's bugging me. Many sources(Wiki, Google dev article) mention that generative models try to model the joint probability distribution p(x, y) and that to generate new samples we sample from this distribution. My question is why model and sample from the joint distribution and not the conditional distribution p(x|y)?
Answer: Its because joint distribution takes all of the values variable Y can take and conditional only one. Additionally it could be easier to compute/use approximative methods for joint. | {
"domain": "datascience.stackexchange",
"id": 6682,
"tags": "generative-models"
} |
Where does the energy stored in inductor go on opening the switch? | Question: Suppose we have a simple RL circuit. At $t=0$, I close the switch so that current starts flowing in the circuit. When the steady state is achieved, current $i=\frac{\epsilon}{R}$ would be flowing in the circuit due to which an energy $\frac{Li^2}{2}$ will be stored in the magnetic field lines on inductor. But as soon as the switch is opened, the current would become $0$, which make the magnetic field lines disappear suddenly, which according to Faraday's law must induce an emf. But as the circuit is open no current will flow in it (according to my teacher, charge can never accumulate in a circuit. So if current flows in open circuit, it would mean that charge is accumulating in it). If there is no current how can the energy in magnetic field lines disappear suddenly? Isn't this a voilation of law of conservation of energy?
Ps: I read the answer given in a similar question Where the energy stored in magnetic field goes? but I kinda disagree with the point that in superconducting coil, current will keep flowing because according to Prof. Walter Lewin, no electric field can exist inside a superconducting coil, so current cannot exist in the coil. Only surface currents must exist.
Answer: This is a situation where the simple rules are insufficient. You simply cannot analyze that circuit any more than you can solve x+2=x+3.
What happens in the real world is that the inductor creates enough emf to form a spark in the switch. This means the switch no longer acts like an ideal switch.
In the real world, we call this effect "flyback.". It can damage components, so we typically design circuits to prevent this from occuring. For example, it is common to see a flyback resistor in parallel with the inductor on large motors. It gives the current somewhere to go. | {
"domain": "physics.stackexchange",
"id": 83387,
"tags": "electromagnetism, electric-circuits, electric-fields, electromagnetic-induction, inductance"
} |
Volume element in radial integration | Question: I am reading the quantum scattering book by Taylor. He pointed out that since we are not observing the magnitude of incoming momentum we integrate over it. Then he replaces the volume element $d^3p$ with $d\Omega~p^2 dp$. Actually I don't understand the reasoning behind this replacement. Can anyone help me on this?
I attached the scan of the related page in the book.
Answer: You may parametrize momentum space in spherical coordinates $(p,\theta,\varphi)$. The volume element in this coordinates may be computed from the Jacobian $J$ of the transformation between Cartesian coordinates and the new ones:
\begin{equation}
dV=dp_xdp_ydp_z=Jdpd\theta\varphi
\end{equation}
Now, from $p_x=pc_\varphi s_\theta$, $p_y=ps_\varphi s_\theta$ and $p_z=pc_\theta$ (where $s_x=\sin x$ and $c_x=\cos x$) it is straightforward to derive $J=p^2 s_\theta$. Defining $d\Omega=\sin\theta d\theta d\varphi$ we get:
\begin{equation}
dV = p^2 dp d\Omega
\end{equation} | {
"domain": "physics.stackexchange",
"id": 38259,
"tags": "scattering, coordinate-systems, integration"
} |
Neural Network output for the game of Checkers | Question: I'm trying to train a RL agent to play the game of checkers (AlphaZero style) and so far I've managed a proof of concept training a connect 4 agent up until perfection. However, unlike connect 4, checkers moves pieces rather than placing them and sometimes even multiple times. I think I understand how I would do this for chess: I have an output size of 80 (16+8*8) and have the first 16 outputs represent the piece that will move and the other 64 represent the position it will move to. I'm not sure if this is a valid solution though. The real problem arises when considering checkers with multiple jumps. Is there any solution to this and am I thinking about it the right way? I've pondered not changing the player turn whenever a double jump is available but I feel like this will screw with the MCTS.
Answer: Correction regarding policy encoding
The policy encoding you propose for Chess is not the one used by the original AlphaZero and would not work. The network needs to output a policy distribution over the possible moves, so there needs to be a dedicated output value for each move. Your encoding only allows the network to express a single move, or at best some low-rank approximation of the full distribution.
The AlphaZero paper (free access) has a section Representation, which explains the policy encoding they used. It has shape 73x8x8. Of course in a chess position there are never actually that many moves, so invalid moves are masked out.
Complex move representations in AlphaZero
As you also note there are two possible solutions to representing these types of compound moves:
Figure out all possible moves and some reasonable way to encode them in a policy head. AlphaZero can still train quite well with a large, sparsely used policy head as shown by their chess results.
Represent the game differently, with moves split up into submoves that don't always change whose turn it is, and then find an encoding for this smaller set of moves. AlphaZero and more generally MCTS doesn't require alternating players, for example the followup work MuZero (free access) works just fine on the Atari games with only a single player.
Practical advice for checkers
I think both solutions would work fine for checkers. Even including compound moves I think checkers still has less than 73*8*8 = 4672 possible moves, although I don't know for sure. If it's in the same ballpark or lower the first method should work. The first solution also has the advantage that it's simpler to implement and that you can achieve deeper search with a fixed number of neural network evaluations.
The second solution is in some sense more general and scalable, so it could be interesting to experiment with as well. | {
"domain": "ai.stackexchange",
"id": 3564,
"tags": "neural-networks, reinforcement-learning, deep-rl, alphazero, board-games"
} |
Counting the overlapping intervals in the union of two sets | Question: I had to recently implement a code to count the union of all intervals (if there are overlapping intervals) for an interview. I was provided with the following function stub:
public int solution(int[] A, int[] B) {
}
A[0] and B[0] form one interval, A[1] and B[1] the next one...and so on.
I tried two approaches.
Copying all the intervals to an object and using collections to sort it based on the first value of an interval(Array A[]). But if the number of intervals is large, then is it a good idea to do so?
public class Solution {
class Pair {
private int a,b;
Pair() {}
Pair(int a, int b) {
this.a = a;
this.b = b;
}
public int getA() {
return a;
}
public int getB() {
return b;
}
}
class PairListComparator implements Comparator<Pair> {
@Override
public int compare(Pair o1, Pair o2) {
if(o1.a < o2.a)
return -1;
else if(o1.a > o2.a)
return 1;
else {
if(o1.b < o2.b)
return -1;
else if(o1.b > o2.b)
return 1;
else return 0;
}
}
}
ArrayList<Pair> inputList = new ArrayList<Pair>();
Solution() {
}
public void createInputList(int[] A, int[] B) {
for(int i = 0; i < A.length; i++) {
Pair p = new Pair(A[i], B[i]);
inputList.add(p);
}
PairListComparator plc = new PairListComparator();
Collections.sort(inputList, plc);
}
public int evaluateList() {
int count = 0;
int ptr = 0;
for(int i = 1; i < inputList.size(); i++) {
int firstA = inputList.get(i-1).a;
int firstB = inputList.get(i-1).b;
int secondA = inputList.get(i).a;
int secondB = inputList.get(i).b;
if(secondA < firstB) {
int newB = firstB > secondB ? firstB : secondB;
inputList.set(ptr, new Pair(firstA, newB));
ptr++;
}
else {
count++;
}
}
return count;
}
public int solution(int[] A, int[] B) {
// write your code in Java SE 8
if((A.length != B.length) || (A.length == 0))
return 0;
createInputList(A, B);
return evaluateList();
}
public static void main(String[] args) {
// TODO Auto-generated method stub
Solution s = new Solution();
/*
* [1, 12, 42, 70, 36, -4, 43, 15], [5, 15, 44, 72, 36, 2, 69, 24]
*/
int[] a = {1,12,42,70,36,-4,43,15};
int[] b = {5,15,44,72,36,2,69,24};
int cnt = s.solution(a, b);
System.out.println(cnt);
}
}
Implementing my own merge sort on the both the arrays. So sorting based on the first value of all intervals(Array A[]). But this was scored quite low (62%) on the performance scale.
What is the best possible solution keeping algorithm efficiency \$O(n \log n)\$ and space \$O(n)\$ in worst case?
public class Solution {
public void mergesort(int[] A, int[] B, int[] temp, int[] temp2, int start, int end) {
if(start < end) {
int mid = (start + end)/2;
System.out.println(mid + ":" + start + ":" + end);
mergesort(A, B, temp, temp2, start, mid);
mergesort(A, B, temp, temp2, mid+1, end);
merge(A, B, temp, temp2, start, mid+1, end);
}
}
public void merge(int[] A, int B[], int[] temp, int[] temp2, int start, int mid, int end) {
int leftEnd = mid - 1;
int left = start;
int size = end - start + 1;
int k = start;
while(left <= leftEnd && mid <= end) {
if(A[left] <= A[mid]) {
temp[k] = A[left];
temp2[k] = B[left];
k++; left++;
}
else {
temp[k] = A[mid];
temp2[k] = B[mid];
k++; mid++;
}
}
while(left <= leftEnd) {
temp[k] = A[left];
temp2[k] = B[left];
left++; k++;
}
while(mid <= end) {
temp[k] = A[mid];
temp2[k] = B[mid];
mid++; k++;
}
for(int i = 0; i < size; i++) {
A[end] = temp[end];
B[end] = temp2[end];
end--;
}
}
public int evaluateList(int[] A, int[] B) {
int count = 0;
int ptr = -1;
for(int i = 0; i < A.length; i++) {
if(i != 0 && A[i] <= B[i-1]) {
B[ptr] = B[i-1] > B[i] ? B[i-1] : B[i];
A[ptr] = A[i-1];
}
else {
count++; ptr++;
}
}
return count;
}
public int solution(int[] A, int[] B) {
int[] temp = new int[A.length];
int[] temp2 = new int[A.length];
mergesort(A, B, temp, temp2, 0, A.length-1);
//System.out.println(Arrays.toString(A));
//System.out.println(Arrays.toString(B));
return evaluateList(A, B);
}
}
Answer: Bugs
I tried this test input:
int[] a = {1, 10, 30, 60 };
int[] b = {2, 20, 40, 70 };
and your program returned 3. Shouldn't the answer be 4 because there are 4 distinct intervals?
Then I tried this test input:
int[] a = {1, 10, 30, 60 };
int[] b = {100, 20, 40, 70 };
and your program returned 2. I would have expected the answer to be 1 because the first interval is a superset of all the other intervals.
Corrected code
Your second implementation seemed to be closer to working than the first. I made the following adjustments to your second implementation to fix the above problems:
public int evaluateList(int[] A, int[] B) {
int count = 1;
int maxB = B[0];
for (int i = 1; i < A.length; i++) {
if (A[i] <= maxB) {
maxB = maxB > B[i] ? maxB : B[i];
} else {
count++;
maxB = B[i];
}
}
return count;
} | {
"domain": "codereview.stackexchange",
"id": 17321,
"tags": "java, performance, interview-questions, comparative-review, interval"
} |
Multiple images of a car over hot road | Question: I am wondering about this image of three cars on a hot road, which is from Wikipedia:
I am seeing two images of the white car in the center of the image. One regular image and one which seems to be reflected from the road. From what I read in Feynman's lecture this is due toe the temperature gradient in the air due to the hot road. This leads to a gradient in the refractive index in the air. This allows for curved paths to be paths of stationary time (Fermat's principle). So the additional image comes from the violet path in the sketch below
And I assume the regular image comes from the green path. All the paths in between the green and the violet (like the blue path) are not stationary and hence do not contribute. We have exactly two stationary paths here (green and violet) and this is why we see the white car twice.
Question 1: Is my understanding correct until here?
Question 2: Why do I see several copies of the headlights of the truck in the back of the image? Are there more than two stationary paths for the light coming from that truck? And why? What is different compared to the car in the center of the image?
Answer: $1$: No. Your eye has a lens. For each point on the car, there is a cone of direct paths from the point to your eye. The lens focuses light from all of those paths to a point on your retina. A camera works much the same way.
There is also a distorted cone that heads toward the road, is bent by non-uniformly hot air near the road, and winds up headed toward your eye. Again your eye focuses it to a point on the retina. As this cone is on final approach to your eye, it is headed upward from the road. The light you receive is just as if there was a car where the road is. So it looks like a car in that direction.
$2$: It looks like the road is not perfectly flat. There seem to be low spots where puddles would collect on a rainy day. This changes the shape of the layer of hot air near the surface.
It could also be differences in road color or composition creating local hot spots. | {
"domain": "physics.stackexchange",
"id": 84369,
"tags": "optics, refraction"
} |
When to clean data? | Question: I am very new to data science / ML and I have what I think is a very basic question - when to 'clean' the data?
Do I clean data before using it to train a classifier (a binary classifier in my experiments)?
Do I clean data that I try to classify using this classifer?
Both?
The data in my case is just a series of Tweets.
Answer: Data Cleaning or Data Munging as it is referred in most cases, is the process of transforming the data from the raw form that they exist after their collection into another format with the intent of making it more appropriate for their future process e.g. training models etc..
This process is taking place at the beginning of the whole procedure and before the training and validation of the models. In text mining problems, you have also to treat the punctuation marks, remove the stopwords (it depends on the data representation that you will choose, for unigrams it is fine, but for bigrams it is not recommended at all) and also do the stemming or lemmatization processes. | {
"domain": "datascience.stackexchange",
"id": 2995,
"tags": "data-cleaning"
} |
Stabilisation of anions by silicon | Question: In my lecture notes it was stated that in $\ce{Me3SiCH2SMe}$, deprotonation with tBuLi generates an anion in between Si and S. I've been not able to find the $\mathrm pK_\mathrm a$ values (not listed in Reich nor Evans).
Since Si is electropositive it should actively destabilise the anion, meaning that I'd expect to see terminal methyl group deprotonated.
No additional detail about conditions was given, but since it was tBuLi I presume low temperature (kinetic deprotonation).
Is there a reason why this deprotonation takes place? Are the $\mathrm pK_\mathrm a$ values relatively different?
Answer:
Since Si is electropositive it should actively destabilise the anion
The statement above is largely one based on an inductive effect through the sigma system, in which the polarisation of the C–Si bond is considered. In this sense, Si does destabilise the anion.
Silicon stabilises anions due to negative hyperconjugation (more broadly, σ-conjugation). This is broadly speaking the pC → σ*Si–C donation, although there is a second, arguably stronger interaction that is also involved in stabilisation of the anion.
The concept of silicon stabilising an anion is easily understood by considering a qualitative molecular orbital diagram:
The stabilisation of an anion by adjacent sulfur, phosphorus and silicon groups. Taken from Molecular Orbitals and Organic Chemical Reactions (Reference edition), Fleming
In the diagram above the strongest interaction is the one between the p-orbital on carbon and the σSi–C orbital — this results in ψ1 being substantially lowered in energy (qualitatively, E, in the diagram above).
A second (and arguably more important) interaction exists between the low lying σ*Si–C and the p-orbital on carbon, ensuring that ψ2 remains relatively low energy (if σ*Si–C was higher in energy, as it would be for some other elements, ψ2 would be raised higher, and this would overall counter-act the energy lowering interaction giving rise to E).
As a final point, the stabilisation is now widely accepted not to involve d-orbitals on silicon, as had been previously hypothesised. Although in theory allowing the empty d-orbitals to accept electron density from the anion would lower the overall energy of the system, the d-orbitals aren't of similar enough energies to the orbitals that the anion charge is in, making efficient overlap highly unlikely. | {
"domain": "chemistry.stackexchange",
"id": 8509,
"tags": "organic-chemistry, acid-base, organosilicon-compounds"
} |
Have meteorites been found in the oldest sedimentary rocks? | Question: Dropstones sometimes occur in sedimentary rocks. These are foreign rocks and boulders that happened to "drop" into a sediment and were fossilised when the sediment was lithified.
(source: Michael C. Rygel via Wikimedia Commons)
But what about meteorites? You can find meteorites here on Earth, and while such large blocks as pictured may be extremely rare, they do exist somewhere. We don't have a lot of meteorites now, but it seems to me that if we go back billions of years, it makes sense that somewhere, sometime, a meteorite has been incorporated into the rock record.
I suppose that it is also unlikely because most meteorites fall in the ocean is then subducted and the meteorite "dropstones" are lost to the mantle.
EDIT
I found this recent (2020) paper talking about 20 cm large meteorites in Ordovician sediments, that came from the breakup of a larger asteroid:
Absolute dating of the L-chondrite parent body breakup with high-precision U–Pb zircon geochronology from Ordovician limestone
Answer: A main source of meteoritic origin in sedimentary strata could have been likely to have been placed in a marine sedimentary environment as debris from a larger impact. Evidently, as you stated, the older the sediment, the greater the chance of alteration or even complete destruction of any trace of the fragments.
In addition to the micrometeorites found in Sweden (as per DavePhD's linked articles), the article Cosmic-ray exposure ages of fossil micrometeorites from mid-Ordovician
sediments at Lynna River, Russia. (Meier et al. 2014), report of observations of chondrite micrometeoritic dust occurring in mid-Ordovician (~ 470 million years ago) sediments found around the Lynna River, Russia, as well as localities in China. The authors state that the composition of the micrometeoritic sample makes them largely resistant to weathering and diagenesis.
The authors of the article Noble gases in fossil micrometeorites and meteorites from 470 Myr old
sediments from southern Sweden, and new evidence
for the L-chondrite parent body breakup event (Heck et al. 2008) concluded that it was 'remarkable' that the solar gases trapped in the chromite grains from the Ordovician-aged 'fossil meteorites found in Sweden (and according to Meier et al. 2014, also in Russia and China) have survived not only the heat up due to entry into the Earth's atmosphere, but also the long residence time in the sediments.
Further back, finding pristine sedimentary layers, let alone intact meteorites within the strata become increasingly difficult to find, so there incorporation into the geology would be in the form of geochemical traces.
A couple of examples of this include the following, the Nature article (Abstract only - paywalled), Tungsten isotope evidence from approx 3.8-Gyr metamorphosed sediments for early meteorite bombardment of the Earth (Schoenberg et al. 2002), suggest that Archaean aged metamorphic rocks found in Greenland and Canada contain tungsten anomalies that can, according to the authors, only be meteoritic remnants in the metamorphosed sediments. Chemical traces of possible chondritic origin have been hypothesised to have been included in Archaen spherules in metamorphosed sedimentary strata in South Africa, according to the article Geological and Geochemical Record of 3400-Million-Year-Old Terrestrial Meteorite Impacts (Lowe et al. 1989) (Abstract only -paywalled). | {
"domain": "earthscience.stackexchange",
"id": 304,
"tags": "earth-history, sedimentology, meteorite"
} |
Computing the n-th prime | Question: I've written some Python 3 code that computes the \$n\$-th prime. I implemented first a naive isprime function that looks for divisors of \$m\$ between \$2\$ and \$\lfloor \sqrt m \rfloor+1\$. Then a loop looks for primes and stops when the \$n\$th one is found.
from math import sqrt
def isprime(n):
for i in range(2,int(sqrt(n))+1):
if n%i==0:
return False
return True
def prime(n):
m=3
i=2
ans=3
while i<=n:
if isprime(m):
i=i+1
ans=m
m=m+2
else:
m=m+2
return ans
It occured to me that prime performs a lot of unnecessary computations: for a given \$m\$, it checks if composite numbers (like 14,16) divide \$m\$. That is useless, and it would be more efficient to look only for prime divisors of \$m\$. This led me to some "storage" approach, where I maintain a list of all the primes I've found, and use them to test for divisors of the next numbers.
from math import sqrt
def prime(n):
list=[2]
i=1
m=3
while i<n:
flag=0
for p in list:
if m%p==0:
flag=1
break
else:
continue
if flag==0:
list.append(m)
m=m+2
i=i+1
else:
m=m+2
return list
The \$n\$th prime is given by prime(n)[-1]
I have an issue with the performance of the second code: it's really slow.
On my computer, according to the Unix command time python code.py, computing the \$6000\$-th prime with the first code takes \$0.231\$ seconds, and \$2.799\$ seconds with the other approach!
Why is the clever way slower than the naive one?
Answer: SuperBiasedMan made a very good answer.
However there is still a performance problem with the cache/list.
Lets say we are finding the 11th prime. We will have the following cache:
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
The 11th prime is 31. But we will check against 29,
even though we know that can't be a factor until the number becomes 58.
What I'm trying to say is, your first function will only try up to \$\sqrt{31}\$,
where your new function tries up to \$31\$.
A simple 'I'm no mathematician' approach to fix this is to just filter the list.
sqrt = math.sqrt(m)
for prime in (i for i in list if i <= sqrt):
# The rest
This however suffers from the problem of testing all the numbers too.
However rather than the test being m % prime == 0, it is now i <= sqrt.
To fix this you can create a generator, and break at i > sqrt.
def filter_cache(num, cache):
sqrt = math.sqrt(num)
for prime in cache:
if prime > sqrt:
break
yield prime
for prime in filter_cache(m, list):
# The rest
This should be faster than both methods.
However your first 'naive' approach will be faster at small numbers.
There is an improved version of the naive program, where you count in steps of 6.
And check either side of that number, so \$6n-1\$ and \$6n+1\$.
Thank you JDługosz for the algorithm.
This algorithm means that there is 1/3 less checks to make.
However you will need to start with a cache / list pre-generated with [2, 3].
I use SuperBiasedMan's cache optimization.
def prime(wanted_index, cache=[2, 3]):
try:
return cache[wanted_index - 1]
except IndexError:
pass
def filter_cache(num):
sqrt = math.sqrt(num)
for prime in cache:
if prime > sqrt:
break
yield prime
def add_prime(num):
for prime in filter_cache(num):
if num % prime == 0:
return index
cache.append(num)
return index + 1
index = len(cache)
prime_counter = (
0
if index == 2 else
cache[-1] + 1
if (cache[-1] + 1) % 6 == 0 else
cache[-1] - 1
)
while index < wanted_index:
prime_counter += 6
index = add_prime(prime_counter - 1)
index = add_prime(prime_counter + 1)
return cache[wanted_index - 1]
Even though it is quite large it's quite simple.
The only strange part would be add_prime.
This is as it takes a number, and if it is prime adds it to the cache.
The special part about this is that cache.append modifies prime's cache.
However if we were to do index += 1 we would get UnboundLocalError. | {
"domain": "codereview.stackexchange",
"id": 15696,
"tags": "python, performance, python-3.x, primes"
} |
If a magnet is completely inside a solenoid, but is moving, Does this induce an emf? | Question: I am not addressing here the case when the magnet is approaching the solenoid or when it is moving away from it. I am asking about that part of the journey when the magnet is inside the solenoid completely but still moving, so in this situation, Do this induce an emf? If the answer is no as I believe it is the case, then why no? Why the magnetic flux inside the solenoid isn't changing even though the magnet is moving?
Answer: There is no emf if the magnet's length is much shorter than that of the solenoid, and the magnet is well inside the solenoid, so not approaching either end. This is because the number of turns linked by the magnet's flux doesn't change as the magnet moves. However, as the magnet approaches one end of the solenoid (even though it's still inside), fewer and fewer turns will be linked by the magnet's flux, so there is a non-zero rate of change of flux linkage, and therefore an induced emf.
[Another way of looking at what's going on is to add the emfs (if any) induced in the individual turns. When the magnet is moving along in the central region of the solenoid no emfs are induced in the turns around the magnet's centre, because the flux linked with these turns is constant (for a short while). But there are equal and opposite emfs induced in turns around the magnet's poles and beyond as the flux linkage increases at and beyond one pole, but decreases at and beyond the other.]
To see all this clearly, draw a solenoid (a long, thin rectangle will do) and a short magnet inside it. Then draw a few representative magnetic field lines for the magnet. [Don't forget that magnetic field lines are continuous closed loops: they emerge from the North Pole, curve round through the air (penetrating the solenoid 'walls'), enter the South Pole and continue, going from South Pole to North Pole inside the magnet.] This should make the paragraph above easier to understand. | {
"domain": "physics.stackexchange",
"id": 92906,
"tags": "electromagnetic-induction"
} |
Does the redshifting of photons from the Universe's expansion violate conservation of momentum? | Question: The energy-momentum relation,
$$E^2 = m^2c^4 +p^2c^2,$$
lets us derive the momentum of a massless particle:
$$p = \frac{E}{c} = \frac{h\nu}{c}$$
However, the expansion of the Universe redshifts light. This should decrease the momentum of photons. Where would the momentum go, in order for conservation of momentum to hold?
Answer: In relativity you can think of a single conservation law that unites conservation of energy and momentum -- conservation of four-momentum. Energy and momentum are the zeroth and the first to third components of the four-momentum respectively. Such conservation laws arise from invariance of the Lagrangian with respect to a translation in space-time coordinates.
In General Relativity these conservation laws are local concepts that (most people think) can only be applied in local, inertial (flat) frames of reference. In particular, they cannot be applied in changing space-times and so cannot be applied to situations involving the expansion of the universe. | {
"domain": "astronomy.stackexchange",
"id": 1972,
"tags": "light, cosmology, general-relativity, expansion, redshift"
} |
What is the best approach for studying the information within frequencies of an unknown signal | Question: If we have a signal and we do not know anything about this signal. We do not know which frequencies contain specific information, and which frequencies does not contain information. Which is the approach for studying this signal and map the possible data to potential frequencies?
For example, when the voice signal was studied, at the beginning the scientists did not know, which frequencies contain the information. They did not know, if the signal or the information is contained in the frequencies 10 GHz - 50 GHZ or in the frequencies 10 MHz - 40 MHz!
It's hard then to decide which sampling frequency we are going to use. If we choose the wrong sampling frequency, then the antialising frequency may throw away the most informative frequencies.
Do we start with trials and errors (or brute force) approach? for example, choosing 1MHz sampling frequency then 2 MHz and so on? until we map all the information to its frequency range. Is there a better approach? Not all experiments are easy to conduct! so it is very costly to do this approach. Consider taking an unknown signal from space; a very rare signal that a very costly radar catch it. That radar sample rate is about 500 GHz and oops! we did not get anything useful from that signal. After years we figure out that we did not find a useful information because the information relies on the frequencies that is larger than 250 GHz.
Answer:
If we have a signal and we do not know anything about this signal. We do not know which frequencies contain specific information, and which frequencies does not contain information. Which is the approach for studying this signal and map the possible data to potential frequencies?
.
My question is about any new signal that a scientist can observe it.
Before an answer that is closer to the point of the question is provided, I think that it would be beneficial to talk a little bit about how do we acquire signals (and why) just to make sure that we share the same understanding.
The starting point for this will be the second phrase "My question is about any new signal that a scientist can observe".
Observation. It does not occur in isolation. The Scientific Method is a tool to guide investigation that, in an abstract way, is still "trial and error".
It starts with prior knowledge, with suspicion, with a hunch, because of an urban legend, because of other experiments. On the basis of this suspicion, a hypothesis is formulated "Well, if you claim to be a clairvoyant then your predictions of what cards I am holding up without you seeing them, should be better than random chance". Notice here that it is very important for the converse to ALSO be true. "If you are not better than random chance then you are not a clairvoyant, (you are a dice...and a very good one...it's not easy)"
On the basis of this, an experiment is formulated. The way the experiment is structured is extremely important because, usually, there are so many factors that might have an effect on the observed phenomenon. What if the subject gradually learns the deck? What if the assistant holding the cards up is giving off subtle messages? And so on.
Following this is data collection. We find clairvoyants, sit them down across a table and ask them to guess what card we are holding up, covered.
Following this is data analysis which is very much adapted to the experiment in general and by extension to the hypothesis.
Following this we might revisit the hypothesis or the experiment with the newly inferred information and go back through the steps from #1. BUT! We might also decide that we have done everything by the book, all of our colleagues have exhausted their brains and skills in thinking about the problem and to the best of our knowledge and ability we dare to make a statement. "Based on the evidence produced by Mr XYZ, the consensus is that they are most likely to be a clairvoyant". I don't know how they do it but we put them to the test and they are. Here is how we did it in all detail so that an independent researcher can REPLICATE the process to confirm or reject our work. In other words we...
Publish everything. So that others can check our work. In this way at a given point in time, "we" (humans) have a pretty good idea about a phenomenon.
Of course, "we" might choose to ignore all of this...for...reasons. But that's another story.
Therefore, we don't just come across some signal AND THEN try to figure out what the signal is about or how did it come to be. Because OBSERVATION comes AFTER hypothesis. Observation is DESIGNED. It has CONSTRAINTS which are very precisely known. Otherwise, we are just guessing.
The particle accelerator of CERN didn't just pop up in its incredibly expensive existence. Before that one, there were tiny little ones that where very clumsy and...elementary...in their operation.
Electroencephalography didn't just spring into existence. Alpha waves are called Alpha because they were discovered first. They occupy the range between 8Hz-13Hz. But Gamma activity goes even higher, up to 100Hz. Take a 100mV off the shelf volt-meter instrument and squeeze its leads. Yes, it will measure the potential across your hands and you will see the needle moving but from that to being able to make a diagnosis on the basis of electrical activity is a HUGE distance both technologically AND conceptually.
So, in conclusion, we don't just go out there hunting for signals.
Right, done, settled, let's go home.
Not so fast.
SOMETIMES signals do drop from the sky...or...pop up from the sea....or run along a river.
So, what do we do then? No one "ordered" this and we can't send it back.
We still apply the scientific method but making the signal the subject. In other words, the signal is now the "Clairvoyant".
Maybe it's a fault with the equipment. Equipment checked, no faults.
Can we reproduce it in the lab? No, it takes a planet
Is it confirmed by models? No, no model predicts the existence of such a specimen.
How about changing the model? Changing the model could produce a solution but also 23 other perfectly valid (and symmetric) solutions.
So what do we do?
We wait and we collect more data and we perform more experiments and build better models and we think more and do it all over from the beginning.
This also covers the approach for mapping "..which frequencies contain information...".
Is the signal similar to background RF radiation?
Yes
Then...it's noise.
Maybe, but maybe it was sent using chaos communications so to an extent it could be indistinguishable from noise.
OK so what do we do?
Acquire another signal at higher sampling frequency
No
Does its spectrum look like anything we have seen before?
Yes
Maybe it is our radiation bouncing back from a planet
No
Is it repeatable?
Yes
Is there another physical source of this radiation that could explain it?
No
Aha! WHEN does it show up?
This is just one of thousands of decision trees you might generate for a problem like this and this is why I wrote earlier that the question is incredibly broad. It cannot be answered with a definite answer but only with the methodology of HOW you answer such questions.
I hope this helps. | {
"domain": "dsp.stackexchange",
"id": 4730,
"tags": "signal-analysis, sampling"
} |
Generating readable text in a human language from machine-readable data | Question: I have been generating English-language text from some machine-readable data, but now I want internationalization of my script to be relatively easy. The challenge is that some data might be missing and thus should be omitted from the output, possibly precluding any approach involving just "plugging in" numbers.
Currently, I have functions like these, the output concatenated together in the main program:
function UserinfoJsFormatQty(qty, singular, plural) {
return String(qty).replace(/\d{1,3}(?=(\d{3})+(?!\d))/g, "$&,") + "\u00a0" + (qty == 1 ? singular : plural);
}
function UserinfoJsFormatDateRel(old) {
// The code below requires the computer's clock to be set correctly.
var age = new Date().getTime() - old.getTime();
var ageNumber, ageRemainder, ageWords;
if(age < 60000) {
// less than one minute old
ageNumber = Math.floor(age / 1000);
ageWords = UserinfoJsFormatQty(ageNumber, "second", "seconds");
} else if(age < 3600000) {
// less than one hour old
ageNumber = Math.floor(age / 60000);
ageWords = UserinfoJsFormatQty(ageNumber, "minute", "minutes");
} else if(age < 86400000) {
// less than one day old
ageNumber = Math.floor(age / 3600000);
ageWords = UserinfoJsFormatQty(ageNumber, "hour", "hours");
ageRemainder = Math.floor((age - ageNumber * 3600000) / 60000);
} else if(age < 604800000) {
// less than one week old
ageNumber = Math.floor(age / 86400000);
ageWords = UserinfoJsFormatQty(ageNumber, "day", "days");
} // ...
return ageWords;
}
It is supposed to generate output like:
A reviewer and rollbacker, 2 years 9 months old, with 8,624 edits. Last edited 7 hours ago.
The age, edit count, date of last edit, or all of them could be missing. How could I improve my current design without resorting to a tabular display format? (All the information needs to nicely fit into a single status line.)
Answer: I see a couple of potential issues with regards to internationalization:
pluralization is more complex than (qty == 1 ? singular : plural). What about the value 0? Also in some languages, different plural forms are in use depending on the number.
numbers should be formatted according to language, using appropriate separators
concatenation (that you mention is done in main program) must be avoided because the order of words and phrases will often vary in translations in different languages
I would advise to :
localize translations using named parameters to be replaced: this solves the ordering issue, and avoids concatenation.
format values, for number formatting and pluralization, using a separate function localized for each language
replace parameters with formatted values in parameterized translations using a template engine: this should keep some potentially buggy regular expressions out of your code :)
You may be interested in having a look at:
JavaScript Internationalisation, a post by Matthew Somerville on 24 Ways
jQuery-global, "a jQuery plugin for the globalization of string, date, and number formatting and parsing"
and last but not least, the i18n API part of the Scalable JavaScript Application framework, which I designed for Legal-Box :)
eric-brechemier/lb_js_scalableApp, the project home page on GitHub
lb.core.Sandbox.js, check the i18n part of the API, methods starting with "i18n."
I may be able to provide more practical suggestions if you show more of your code, especially at the "top", part of the main program. | {
"domain": "codereview.stackexchange",
"id": 105,
"tags": "javascript"
} |
How can we assume the asymptotic complexity of 1/2n^2 - 3n | Question: I am trying to understand how asymptotic complexity of the given function is calculated based out of Introduction to algorithms by Thomas Cormen.
In the book we are trying to solve inequality for $f(n) = \frac12n^2-3n$.
It is solved such that $c_1n^2 \leqslant \frac12n^2 - 3n \leqslant c_2n^2$.
Dividing both sides by $n^2$ results in $c_1\leqslant \frac12 - \frac3n\leqslant c_2$.
In the book I cannot understand the below explanation that the author gives. Can someone help me out here ?
evaluating inequality and calculatin n0 as mentioned in the book
Full text as in the book
problem description
Answer: The author chooses an initial value $n_0$ of $n$ such that for $n\geqslant n_0$, $f(n)$ is positive.
The value $n_0$ is found solving:
$$f(n) > 0\iff\frac12-\frac3n> 0 \iff \frac12> \frac3n\iff n>6$$
That's the reason for $n_0 = 7$.
Now, note that for $n\geqslant n_0$, $\frac12-\frac3{n_0}\leqslant \frac12-\frac3n$. In order to get $c_1$ such that $c_1\leqslant \frac12-\frac3n$ foreach $n\geqslant n_0$, it suffices to solve:
$$c_1\leqslant \frac12-\frac3{n_0} = \frac12-\frac37 = \frac1{14}$$
That's the reason for $c_1 = \frac1{14}$.
Now, since for all $n$, $\frac12-\frac3n\leqslant \frac12$, we can choose $c_2 = \frac12$. | {
"domain": "cs.stackexchange",
"id": 19650,
"tags": "time-complexity, asymptotics, big-o-notation"
} |
Must a reversible engine be a carnot engine? | Question: I have this homework question:
"Show that any reversible engine operating between T1 and T2 is a carnot engine."
I think I have a solution, but it feels very hand-wavy. We know that any process that can be represented as a loop in the PV plane is reversible as the net entropy change will be zero. We must operate between two specifice temperatures, so the loop must comprise of two isotherms a T1 and T2. So the question is what curves join the isotherms. As a heat engine comprises of energy input at constant temperature, there will be no energy change between the isotherms. So the curves connecting the isotherms must be adiabatic curves. So we have a carnot cycle.
Is this sufficient? I don't know why, but I doubt it.
Answer: I suspect the expression "operating between T1 and T2" actually means "operating between heat reservoirs with temperatures T1 and T2". But even then I am not sure "any reversible engine operating between heat reservoirs with temperatures T1 and T2 is a Carnot engine." As far as I know, a Carnot engine is an engine "that operates on the reversible Carnot cycle" (http://en.wikipedia.org/wiki/Carnot_heat_engine ), which cycle consists of two isothermal processes and two adiabatic processes. However, it seems that more complex reversible processes can exist that use the same isothermal processes (but maybe different parts of them) and more than two adiabatic processes (e.g., T1S1-T1S2-T2S2-T2S3-T1S3-T1S4-T2S4-T2S1-T1S1). so maybe the condition of the problem lacks some additional requirement. | {
"domain": "physics.stackexchange",
"id": 40975,
"tags": "homework-and-exercises, thermodynamics, statistical-mechanics, carnot-cycle"
} |
Mohr's circle: maximum normal stress tensor | Question: Given:
$$
\sigma_{ij}=
\left[ {\begin{array}{cc}
-20 & 60 \\
60 & 90 \\
\end{array} } \right],\quad i,j=x,y
$$
I want to find the principle stress tensor $\sigma_{ij}^{pr}$.
Using the Mohr's Cirlce, I get:
$$\sigma_{max}=116.39,\sigma_{min}=-46.39$$
the points where the circle intersects with the $x(\sigma_{xx},\sigma_{yy})$ axis.
From there, how do these points make up the principle stress tensor?
Is
$$
\sigma_{ij}^{pr}=
\left[ {\begin{array}{cc}
116.39 & 0 \\
0 & -46.39 \\
\end{array} } \right],\quad i,j=x,y
$$
correct?
Answer: The values you determined for $\sigma_{\mathit{max}}$ and $\sigma_\mathit{min}$ are correct. However, your answer is only partially correct.
The value of $\sigma^\mathrm{pr}$ has to be expressed in the principal coordinate system. You can calculate the angle of the principal system to your original coordinate system using:
$$
tan(2\varphi) = \frac{2\tau_{xy}}{\sigma_x-\sigma_y} = \frac{2 * 60}{-20-90}
$$
$$
\varphi_1 \approx 23.74°
$$
So yes the values are correct, but your coordinate system $x^\mathrm{pr}, y^\mathrm{pr}$ is rotated by $\varphi_1$ degrees, compared to your original coordinate system $x, y$. | {
"domain": "engineering.stackexchange",
"id": 2178,
"tags": "stresses"
} |
GPS readings getting swapped | Question:
Two of my GPS readings get swapped as follows:
I have a robot (named raven) which runs a gps and uses the gpsd_client and utm_odometry to publish on a topic named raven_gps_odom. Similarly I have a laptop(named uav) which runs another gps and publishes on laptop_gps_odom. The laptop's ROS_MASTER is set to raven and the two run on the same network. The config files are here .
When I do a rostopic echo laptop_gps_odom and rostopic echo /raven_gps_odom several times consecutively I find that their gps readings get swapped every 10 seconds or so.
Why is this so?
Originally posted by PKG on ROS Answers with karma: 365 on 2011-10-13
Post score: 1
Answer:
It doesn't look like you are remapping the output topics from the gpsd_client. Since they are using the same rosmaster, you are publishing from two different drivers on the same topic.
You should remap both of the outputs for each gpsd_client and the appropriate inputs to utm_odometry. Another option would be to run both sets in their own namespaces.
Originally posted by Eric Perko with karma: 8406 on 2011-10-13
This answer was ACCEPTED on the original site
Post score: 2 | {
"domain": "robotics.stackexchange",
"id": 6969,
"tags": "ros, gpsd-client"
} |
How to subscribe to control commands from the LGSVL simulator using only ROS2? | Question:
Hello!
Is it possible to get the control commands i.e steering pattern from the latest LGSVL simulator? What topic should I subscribe to, to fetch this data for training later? Previously it used to be /simulator/control/command. Now it has changed. I would like to strictly avoid using Autoware and Apollo and just use ROS2. Has any one done it this way?
Any pointers?
If I have to change the internal code of LGSVL, any leads on how to do that?
Also if I want throttle, braking information too, how should I go about with just ROS2?
Thanks.
Update:
The LGSVL team have updated their code base to reflect changes to the latest simulator. I'm currently testing it out.
Update 2:
After testing and doing some modification to my needs, I was able collect data. The solution is to use a sensor plugin with custom topic name and type.
Originally posted by dr563105 on ROS Answers with karma: 38 on 2020-05-03
Post score: 0
Answer:
After testing and doing some modification to my needs, I was able collect data. The solution is to use a sensor plugin compatible with LGSVL using custom topic name and type.
Originally posted by dr563105 with karma: 38 on 2020-05-23
This answer was ACCEPTED on the original site
Post score: 0 | {
"domain": "robotics.stackexchange",
"id": 34891,
"tags": "ros2"
} |
Simple LINQ statement for report limit | Question: What would be the most efficent way to calculate this?
var reportLimit = 96*1024;
IEnumerable<int> memoryInUse =
things
.Where(sample => sample.IsOn)
.Select(sample => sample.MemoryInMb)
.ToArray();
int totalUnderReportLimit = memoryInUse.Where(ram => ram <= memoryCalcFactor).Sum();
int totalOverEqualReportLimit = memoryInUse.Count(ram => ram > memoryCalcFactor) * memoryCalcFactor;
return totalUnderReportLimit + totalOverEqualReportLimit;
Answer: Well here's my take on it. If you want efficient in terms of speed, write a loop. LINQ is not for writing fast code, it's for writing concise code.
memoryInUse is just a collection of the memory counts of all the machines that are on. You then essentially partition those machines by some memoryCalcFactor to do some calculation.
The call to ToArray(), while somewhat helpful (you do the filter/projection once) isn't really needed. You can perform your calculation in one pass and therefore don't need it at all. Look at the following lines in your calculation:
var x = memoryInUse.Where(ram => ram <= memoryCalcFactor).Sum();
var y = memoryInUse.Count(ram => ram > memoryCalcFactor) * memoryCalcFactor;
return x + y;
What are we doing here?
Adding up all the sizes that are less than some factor
Counting all those that are greater than that factor multiplying by that factor.
Adding the previous results
Looking at this at a much higher level, what are we doing here? We're adding up the sizes limiting each size by some factor (a maximum). Write your code to do that.
This is how I'd write it:
var memorySizes = machines
.Where(machine => machine.IsOn)
.Select(machine => machine.MemoryInMB);
var result = memorySizes.Sum(size => Math.Max(size, memoryCalcFactor));
Otherwise if you don't want to take the performance hit LINQ will give you and use a loop, here's the equivalent:
var sum = 0;
foreach (var machine in machines)
{
if (machine.IsOn)
{
sum += Math.Max(machine.MemoryInMB, memoryCalcFactor);
}
} | {
"domain": "codereview.stackexchange",
"id": 1949,
"tags": "c#, linq"
} |
Function wrapper that prints the result | Question: (English)
I was recently challenged to make a simple program that does the following:
Function f accepts as a parameter a function g, which returns a value. Function f must return a function h which returns and prints the value that function g returns.
After some research I got the solution and the program was successful, but I wonder if the code is done in the best way. I would appreciate immensely a critique.
This is the code:
Hace poco me retaron a realizar un programa sencillo que realiza lo siguiente:
La función f acepta como parámetro una función g, la cual regresa un valor. La función f debe de regresar una función h la cual regresa e imprime el valor que regresa la función g.
Luego de investigar un poco conseguí la solución y el programa resulto exitoso, pero me pregunto si el código esta hecho de la mejor manera. Agradecería inmensamente una critica.
Este es el código:
#include <iostream>
using namespace std;
auto g() // Función que regresara el valor principal
{
return 123;
}
template <class FT>
auto f(FT* fref) // Función que recibira una referencia de una función
{
return [=]() -> auto // Regresa una función creada a partir de "fref"
{
cout << fref() << endl;
return fref();
};
}
int main()
{
auto h = f(&g); // Llamando la función f pasando la referencia de la función g
auto resultado = h();
cout << resultado << endl;
return 0;
}
Answer: I see one substantial problem with your implementation:
f() calls fref() twice - bad enough if each call consumes considerable resources,
probably plain wrong where the call has side effects.
(The funny part is main() introducing a variable to hold the result without need.) | {
"domain": "codereview.stackexchange",
"id": 42745,
"tags": "c++, performance, beginner, programming-challenge"
} |
Organic Nomenclature: But-1-en-3-yne or But-3-yne-1-en | Question: Just wondering whether "but-1-en-3-yne" or "but-3-yne-1-en" follows the correct IUPAC naming convention for organic molecules. I'm going through a text book that uses the former in an example, but in the answer to one of the questions it uses the latter, so is it interchangeable or is one just wrong?
Answer: The IUCN Blue Book says that:
P-31.1.1.1 The presence of one or more double or triple bonds in an otherwise saturated parent hydride [...] is denoted by changing the ending ‘ane’ of the name of a saturated parent hydride to ‘ene’ or ‘yne’. Locants as low as possible are given to multiple bonds as a set, even though this may at times give ‘yne’ endings lower locants than ‘ene’ endings. If a choice remains, preference for low locants is given to the double bonds. In names, the ending ‘ene’ always precedes ‘yne’, with elision of the final letter ‘e’ in ‘ene’. Only the lower locant for a multiple bond is cited, except when the numerical difference between the two locants is greater than one, in which case the higher locant is enclosed in parentheses.
Therefore the double bond gets priority whatever be its locants (numbering) i.e., '-ene' comes before '-yne'. And in this case it's "but-1-en-3-yne". | {
"domain": "chemistry.stackexchange",
"id": 14850,
"tags": "organic-chemistry, nomenclature"
} |
Increasing parameter $\omega$ in $x[n] = e^{j\omega n }$ | Question: Suppose $x[n] = e^{j\omega n }$. I've seen following statement many times in the different sources:
For the discrete-time sinusoidal signal $x[n]$, as $\omega$ increases from $\omega = 0$ toward $\omega = \pi$, $x[n]$ oscillates progressively more rapidly. However, as $\omega$ increases from $\omega = \pi$ to $\omega = 2\pi$, the oscillations become slower.
I think it means that if we increase $\omega = 0$ toward $\omega = \pi$, fundumental frequency of $x[n]$ should increase. If we set $\omega = \frac{\pi}{3}$ then $$\frac{\omega}{2\pi} = \frac{\pi /3 }{2\pi} = \frac{1}{6} = \frac{m}{N} \implies f_0 = \frac{w}{m} = \frac{\pi}{3}$$
and if $\omega = \frac{5\pi}{13}$ then $$\frac{\omega}{2\pi} = \frac{5\pi /13 }{2\pi} = \frac{5}{26} = \frac{m}{N} \implies f_0 = \frac{w}{m} =\frac{\pi}{13}$$
So we have increased $\omega$ but the fundamental frequency has been decreased.
So what's wrong here? Is my understanding about the aforementioned statement wrong? If so, what happens exactly to the fundamental frequency as we change $\omega$?
Answer: I think you have assumed that if a discrete sinusoidal is of higher frequency then its fundamental frequency should also be higher. This is not correct assumption.
Secondly, you are mixing this understanding of yours with rapidness of oscillations of a discrete sinusoidal.
Even if fundamental frequency of $2^{nd}$ sinusoidal is $\frac{2\pi}{26}$, the $2^{nd}$ discrete sinusoidal is the $5^{th}$ harmonic of it's fundamental frequency. Whereas, $1^{st}$ discrete sinusoidal of discrete frequency $\frac{\pi}{3}$ is the very $1^{st}$ harmonic of it's fundamental frequency $\frac{\pi}{3}$. So, comparison between fundamental frequency cannot give you a sense of rapidness of oscillation of digital frequencies.
Then what can give you the sense of rapidness: the digital frequencies themselves.
$e^{j\frac{5\pi}{13}n}$ will oscillate more rapidly than $e^{j\frac{\pi}{3}n}$ because at every change of $n$, the change in its phase is more i.e. $\frac{5\pi}{13} > \frac{\pi}{3}$. This will happen until $\omega = \pi$.
What happens when $\omega > \pi$, for example $\omega = 2\pi$.
When $\omega = 2\pi$, even then the change in phase at every change of $n$ is more than a sinusoid with $\omega = \pi/3$. Ofcourse, at every step the former changes its phase by $2\pi$ and the later only changes its phase by $\frac{\pi}{3}$. But when the change in phase is $2\pi$ at every step, it will seem like the phase is not changing at all. Changing phase by $2\pi$ is same as not changing it at all.
In general, changing a phase by $\omega = \pi + \omega_o$ is same as changing it by $-(\pi-\omega_o)$, where $\omega_o \le \pi$. That is why the rapidness of oscillation increases only till $\omega$ reaches $\pi$, and it starts decreasing. | {
"domain": "dsp.stackexchange",
"id": 8667,
"tags": "discrete-signals, frequency"
} |
A* Generic Implementation in Swift | Question: My first Code Review post.
I'm up to a generic implementation of the A* search algorithm in Swift (for now, it's a single goal implementation).
Here's what's been coded so far:
// State : a protocole for states in the search space
protocol State : Equatable {
// successors() : returns an array of successors states in the search space
func successors() -> [Successor<Self>]
// heuristic(goal) : returns the heuristic value for a given states in relation to a given goal state
func heuristic(goal:Self) -> Double
// id : a string identifying a state
var id : String { get }
}
// States are compared by their id
func ==<T:State>(lhs:T, rhs:T) -> Bool {
return lhs.id==rhs.id
}
// Successor : represents a successor state and its cost
struct Successor<T:State> {
var state: T
var cost: Double
}
// Plan : a plan of states
struct Plan<T:State> {
// states : an array of states that make a plan
var states: [T]
// cost : the total cost of the plan
var cost: Double
// isNot(another) : checks if another plan is different from the current one
func isNot(another: Plan) -> Bool {
return !(states == another.states && cost == another.cost)
}
}
// AStar<TState> : finds the A* solution (nil if no solution found) given a start state and goal state
func AStar<TState:State>(start: TState, goal: TState) -> Plan<TState>? {
var fringe : [Plan<TState>] = [Plan(states: [start], cost: 0)]
while fringe.count>0 {
let bestPlan = fringe.minElement({
a,b
in
a.cost + a.states.last!.heuristic(goal) < b.cost + b.states.last!.heuristic(goal)
})!
fringe = fringe.filter({
plan in plan.isNot(bestPlan)
})
if bestPlan.states.last! == goal {
return bestPlan
}else{
let successors = bestPlan.states.last!.successors()
for successor in successors.filter({ s in !bestPlan.states.contains(s.state) }) {
let newPlan = Plan(states: bestPlan.states+[successor.state], cost: bestPlan.cost+successor.cost)
fringe.append(newPlan)
}
}
}
return nil
}
I'm here for some suggestions to make this Swiftier
EDIT:
I tested this implementation using a graph of vertices representing cities with costs representing road distances. I used the straight line distance as a heuristic value :
let adjacencyList : [String:[String:Double]] = [
"oued" : ["biskra":150.0],
"biskra" : ["batna":120.0, "oued":150.0],
"batna" : ["biskra":120.0, "barika":40.0, "setif":100.0, "constantine":110.0],
"barika" : ["batna":40.0, "setif":50.0],
"setif" : ["batna":100.0, "barika":50.0, "constantine":50.0, "bejaia":80.0],
"constantine": ["batna":110.0, "setif":50.0, "annaba":80.0],
"bajaia" : ["setif":80.0, "annaba":30.0],
"annaba" : ["constantine":80.0, "bejaia":30.0]
]
let straightLineDistances = [
"biskra" : ["annaba":220.0],
"batna" : ["annaba":140.0],
"barika" : ["annaba":200.0],
"setif" : ["annaba":100.0],
"constantine": ["annaba":80.0],
"bejaia" : ["annaba":30.0],
"oued" : ["annaba":320.0],
"annaba" : ["annaba":0.0]
]
final class Vertex : State, CustomDebugStringConvertible {
let label : String
init(label:String) {
self.label = label
}
func successors() -> [Successor<Vertex>] {
return adjacencyList[label]!.map { x in Successor<Vertex>(state:Vertex(label: x.0),cost: x.1) }
}
func heuristic(goal:Vertex) -> Double {
return straightLineDistances[label]![goal.label]!
}
var id : String {
return label
}
var debugDescription : String {
return id
}
}
let solution = AStar(Vertex(label: "biskra"), goal: Vertex(label: "annaba"))
print(solution)
And the output solutions was the expected A* solution. But I'm more concerned about the elegance of this implementation
Answer: This code isn't bad at all. You make great use of the generic features of Swift and you also go for the functional approach over the iterative one whenever it's a good fit. Here are some ways to make your code "Swiftier":
Spacing. Readability is very important in Swift and your code currently doesn't follow all the best practices. Operators are usually surrounded by spaces (e.g. lhs.id == rhs.id) and colons are usually only followed by a space (e.g. protocol State: Equatable and var id: String).
Don't force unwrap. You used states.last! twice. Even though states is never empty in this case, the compiler doesn't actually enforce it. No matter how sure you are that a value isn't nil, it's generally frowned upon to use !. Instead, you could add var lastState: T to the Plan struct which keeps track of the last state.
Use trailing closures. If the last parameter of a function is a closure, you can put it outside the brackets: myArray.filter { $0 > 3 }.
The isNot(_:) function is not very Swifty. Using != would make much more sense, so the logical thing to do is to extend Plan to conform to Equatable. Also, I don't think you have to check whether cost == another.cost because there's no way the two would be different if the states are equal.
Don't use fringe.count > 0, but instead use !fringe.isEmpty. It's both more readable and for some data structures it's also more efficient (in case there's no constant time random access).
Instead of force unwrapping minElement, use while let bestPlan = fringe.minElement(...) instead.
Don't use short closure arguments such as a, b, x. Either give them more meaningful names, or use anonymous closure arguments ($0, $1, etc.).
Instead of using filter to remove bestPlan from fringe, use removeAtIndex instead. You can get this index using fringe.indexOf(bestPlan) (which you can unwrap simultaneously with fringe).
Use a guard to return bestPlan in case the goal is reached, both to increase readability and to prevent nesting.
Use a where-clause instead of filter to loop through all successors that are not contained in bestPlan.states. Using filter is good, but this is what where is for.
The way you construct newPlan isn't very elegant. You could consider adding an append function to Plan for adding a successor.
Here's what my version looks like after the changes:
// State : a protocole for states in the search space
protocol State: Equatable {
// successors() : returns an array of successors states in the search space
func successors() -> [Successor<Self>]
// heuristic(goal) : returns the heuristic value in relation to a given goal state
func heuristic(goal: Self) -> Double
// id : a string identifying a state
var id: String { get }
}
// States are compared by their id
func == <T: State> (lhs: T, rhs: T) -> Bool {
return lhs.id == rhs.id
}
// Successor : represents a successor state and its cost
struct Successor<T: State> {
var state: T
var cost: Double
}
// Plan : a plan of states
struct Plan<T: State> {
// states : an array of states that make a plan
var states: [T]
// lastState : the last state of the plan
var lastState: T
// cost : the total cost of the plan
var cost: Double
// initialise a plan with a single state
init(state: T) {
states = [state]
lastState = state
cost = 0
}
// append a successor to this plan
mutating func append(successor: Successor<T>) {
states.append(successor.state)
lastState = successor.state
cost += successor.cost
}
// the non-mutating version of append(_:)
func appending(successor: Successor<T>) -> Plan {
var new = self
new.append(successor)
return new
}
}
extension Plan: Equatable {}
func == <T: State> (lhs: Plan<T>, rhs: Plan<T>) -> Bool {
return lhs.states == rhs.states
}
// AStar<TState> : finds the A* solution (nil if no solution found) given a start state and goal state
func AStar <TState: State> (start: TState, goal: TState) -> Plan<TState>? {
var fringe = [Plan(state: start)]
// computes the best plan from the fringe array
// I made this its own function to make the `while let` statement more readable
func bestPlan() -> Plan<TState>? {
return fringe.minElement {
$0.cost + $0.lastState.heuristic(goal) < $1.cost + $1.lastState.heuristic(goal)
}
}
while let bestPlan = bestPlan(), index = fringe.indexOf(bestPlan) {
fringe.removeAtIndex(index)
guard bestPlan.lastState != goal else { return bestPlan }
let successors = bestPlan.lastState.successors()
for successor in successors where !bestPlan.states.contains(successor.state) {
fringe.append(bestPlan.appending(successor))
}
}
return nil
} | {
"domain": "codereview.stackexchange",
"id": 20303,
"tags": "swift, a-star"
} |
How to maximize recall? | Question: I'm a little bit new to machine learning.
I am using a neural network to classify images. There are two possible classes. I am using a Sigmoid activation at the last layer so the scores of images are between 0 to 1.
I expected the scores to be sometimes close to 0.5 when the neural net is not sure about the class of the image, but all scores are either 1.0000000e+00 (due to rounding I guess) or very close to zero (for exemple 2.68440009e-15). In general, is that a good or bad thing ? How can this behaviour be avoided?
In my use case I wanted to optimize for recall by setting a lower threshold but this has no impact because of what I described above.
More generally, how can I minimize the number of false negatives when in training the neural net only cares about my not ad-hoc loss ? I am ok with decreasing accuracy a little bit to increase recall.
Answer: Train to avoid false negatives
What your network learns depends on the loss function you pass it. By choosing this function you can emphasize various things - overall accuracy, avoiding false negatives, false positives etc.
In your case you probably use a cross entropy loss in combination with a softmax classifier. While softmax squashes the prediction values to be 1 when combined across all classes, the cross entropy loss will penalise the distance between the actual ground truth and the prediction. In this calculation it will not take into account what the values of the "false negative" predictions are. In other words: The loss function only cares for the correct class and its related prediction, not for the values of all other classes.
Since you want to avoid false negatives this behaviour is probably the exact thing you need. But if you also want the distance between the actual class and the false predictions another loss function that also takes into account the false values might even serve you better. Give your high accuracy this poses the risk that your overall performance will drop.
What to do then?
Making the wrong prediction and being very sure about it is not uncommon. There are millions of things you could look at, so your best guess probably is to investigate the error. E.g. you could use a confusion matrix to recognize patterns which classes are mixed with which. If there is structure you might need more samples of a certain class or there are probably labelling errors in your training data.
Another way to go ahead would be to manually look at all (or some) examples of errors. Something very basic as listing the errors in a table and trying to find specific characteristics can guide you towards what you need to do. E.g. it would be understandable if your network usually gets the "difficult" examples wrong. But maybe there is some other clear systematic your network did not pick up yet due to lack of data? | {
"domain": "datascience.stackexchange",
"id": 9438,
"tags": "machine-learning, neural-network, deep-learning, keras, image-classification"
} |
How is entropy a state function? | Question: Is there only one reversible way to move from one state to another?
If we consider two states $A$ and $B$ on an isotherm and we move from $A$ to $B$ by first reversible isochoric process and then reversible isobaric process. Now the path followed should be reversible since both the processes were reversible. But what about simply following the reversible isothermal process?
According to me both processes should be reversible. Now entropy is the heat added reversibly to move from one state to another divided by the temperature at which it is added. But we know that the heat added to the system is different in both the cases. Then how is entropy a state function?
Answer: The total heat added in both the processes is different. Change in entropy is defined as $\int(dQ/T)$. Along the isotherm, the temperature remains constant. But along the other two reversible processes you have mentioned, the temperature is not constant. Effectively, it can be seen by integration that change in entropy in both processes is the same. | {
"domain": "physics.stackexchange",
"id": 38824,
"tags": "thermodynamics, entropy, reversibility"
} |
How to calculate S/N in galaxy absorption spectra? | Question: I want to calculate the S/N of an galaxy absorption spectrum.
I therefore chose a part of the spectrum which contains very little to none absorption features and should be only made up of noise and the continuum flux (see the region in next plot)..
I then fitted a 4th degree polynomial to that data using np.polyfit().
If I understand the situation correctly the signal should be the fitted polynomial, since this is the spectral continuum in this region. And the noise would correspond to the deviations from that continuum (blue curve in plot). Then I would calculate the S/N = polynom / flux. Is this the right approach ? And how do I get only one value for the S/N since just dividing the polynom by the flux will give me a S/N value for each pixel. Maybe I mixed something up but I can not get my head arround it. I am grateful for any tips.
Answer: The average signal-to-noise would be the average level of the spectrum signal divided by the RMS difference of the fit to the polynomial. i.e. You want
$${\rm RMS} = \left(\frac{1}{N}\sum_i^{N} (y_p - y_i)^2 \right)^{1/2} \ , $$
where $y_i$ are the pixel data values and $y_p$ is the polynomial model value at that pixel.
The signal-to-noise ratio of a spectrum is in general wavelength-dependent. The way you have done this could give you a wavelength-dependent value if you split the spectrum into smaller pieces and fit a polynomial to the continuum of each piece.
You cannot get a pixel-by-pixel value using this method, since the RMS is not well-defined for single pixels (or even for a few pixels). For that you should have propagated the uncertainty of each pixel during all the spectrum extraction steps. | {
"domain": "astronomy.stackexchange",
"id": 6827,
"tags": "observational-astronomy, galaxy, data-analysis, spectra, python"
} |
Can apparent depth be greater than real depth in any case? | Question:
It's written that AO is real depth and Ao1 is apparent depth. But it is looking like apparent depth is greater than real depth in the figure when oject is seen from other side of the slab. Is it possible. Can apparent depth be greater than real depth.
And I have one more doubt ... In this case Real depth / Apparent depth = 1/refractive index but in general ratio of real and apparent depth equals refractive index... Why in this case this is opposite.
Thanks.
Answer: To answer the question in your title : Yes. Apparent depth is usually less than real depth because you are looking from a medium of lower optical density (air) into a medium of higher optical density (water or glass). If you were under water looking up at an object outside of the water, it would appear further away.
In the diagram, $O_1$ is the image of object $O$ when viewed from inside the glass block. It is located by tracing back rays inside the glass block. The object is in air, the observer is in glass, so this is a case in which apparent depth should be greater than real depth.
$I$ is the image of object $O$ when viewed from air on the other side of the glass block. It is located by tracing back rays in air in the region of the observer. Both object and image are in air, but there is a layer of optically denser material in between, so the apparent depth is again less than the real depth. However, the ratio of real to apparent depth is not as small as it would be if $O$ were inside the block.
Your formula does not work in this case because the ratio also depends on the thickness of the glass block. It is correct when there is 1 interface between different optical media - here there are 2 interfaces.
The order of the media is also important. The refractive index is more properly given not only for 2 media but also for the direction in which rays are travelling : ie air-to-glass or glass-to-air. For this reason the index is sometimes written as $$_a n_g = \frac{1}{_g n_a}$$ | {
"domain": "physics.stackexchange",
"id": 40953,
"tags": "geometric-optics"
} |
Need suggestion regarding use of laserscanner or radar | Question:
Hi,
I am currently doing research in the field of Autonomous Navigation. Although this question might be slightly inappropriate for ROS forum, I decided to post it for getting some good suggestions. I have been using LMS Sick-200 laser-scanner for performing 2D detection of obstacles for velocity estimation. However, if I check the state-of-the-art technology, most of the vehicles (Vovlo, Google, Ford, Honda, etc) use RADAR sensors (both long and short range) for performing tasks like object detection, collision avoidance, velocity estimation, etc. Can anyone suggest whether it would be a better decision to shift to RADAR and whether I would get enough support in ROS and the available wrappers and drivers if I start using Radar instead of laser-scanners (like how it is available for SICK and Hokuyo lidars).
Thanks
Originally posted by Ashesh Goswami on ROS Answers with karma: 36 on 2014-05-15
Post score: 0
Original comments
Comment by SorinV on 2017-07-11:
Did you find any resources on radars? It's been 3 years but radars are still not a part of ROS as they are used in the autonomous vechicles projects, and as you said, they are cheaper than a LIDAR
Answer:
I do not know of any support for RADAR sensors in ROS at this time (which doesn´t mean it doesn´t exist somewhere, only that their use is very uncommon). LIDAR based sensing (SLAM, point cloud processing etc.) however is ubiquitous in the ROS world, so from an availability standpoint LIDAR sensors are the clear winners.
It also appears to me that Velodyne type sensors are still the main sensor for 360deg coverage object detection, collision avoidance etc. on most (highly) autonomous car projects (certainly the Google one). Caveats due to cost or the desire to have a "clean" car configuration apply of course.
Originally posted by Stefan Kohlbrecher with karma: 24361 on 2014-05-15
This answer was ACCEPTED on the original site
Post score: 0
Original comments
Comment by Ashesh Goswami on 2014-05-17:
thanks a lot..yes I agree that the Velodyne 3D LIDARS are the main sensors used for complex autonomous car projects but it is a very expensive tool to be used in case I am planning to perform comparatively easier tasks like lane change or collision avoidance..so I guess I should keep the hunt on to see if any radar related help is available..otherwise I would stick to the SICK LMS laser-scanner. | {
"domain": "robotics.stackexchange",
"id": 17964,
"tags": "ros, sicklms"
} |
"Mysterious radio signals" - how could a geostationary satellite and Ross-128 line up with Arecibo? | Question: The Phys.org article 'Mystery' signal from space is solved. It's not aliens cites a summary of the conclusion "by Abel Mendez, director of the Planetary Habitability Laboratory at the University of Puerto Rico at Arecibo in a blog post Friday, revealing the true nature of the signals."
After further fueling speculation by summoning the world experts in the hunt for life elsewhere in the universe—The SETI Berkeley Research Center at the University of California—the team issued its conclusion.
"We are now confident about the source of the Weird! Signal," Mendez wrote.
"The best explanation is that the signals are transmissions from one or more geostationary satellites."
The signals only appeared around Ross 128 because it is located "close to the celestial equator where many geostationary satellites are placed," Mendez added.
The article also goes on to say:
Astronomers detected strange signals that seemed to be coming from a dwarf star about 11 light-years away, but have now determiend that the signals are interference from a distant geostationary satellite
Mendez is further quoted the Space.com article Weird Radio Signals Detected from Nearby Red Dwarf Star:
Each of these hypotheses has its issues, he said. For example, solar flares of the type that could be responsible generally occur at lower frequencies. In addition, Mendez wrote, there aren't a lot of other objects in the Ross 128 field of view, "and we have never seen satellites emit bursts like that."
and in the Space.com article Not Aliens: Weird Radio Signal from Star Likely Has Duller Explanation:
"This explains why the signals were within the satellite’s frequencies and only appeared and persisted in Ross 128; the star is close to the celestial equator, where many geostationary satellites are placed," Mendez added. "This fact, though, does not yet explain the strong dispersion-like features of the signals (diagonal lines in the figure); however, it is possible that multiple reflections caused these distortions, but we will need more time to explore this and other possibilities."
Wikipedia lists the star Ross 128 at at declination of about +0° 48', and the latitude of the Arecibo telescope to be about +18° 21'. Satellites in geostationary orbits are in a circular ring approximately coincident with the equator at a distance from the center of the Earth of only about 42,000 km. From the Northern hemisphere, geostationary satellites will appear several degrees south in declination. (See this interesting answer.)
For Arecibo, I calculate about -3.2° declination.
QUESTION: How could a radio telescope have picked up satellite signals from a satellite at about -3.2° declination while observing a star at +0.8° declination? And why wasn't this immediately ruled out - surely radio telescopists know about satellites by now, especially the crowded ring of geostationary satellites! Aren't there standard procedures to rule these out before making announcements?
Could the half-angle acceptance really have been 4 degrees? Since the brown red dwarf is extremely close to the solar system, could proper motion put it 4 degrees away from the value listed in Wikipedia?
above: "The signal that seemed to emanate from the red dwarf star Ross 128, as detected by the Arecibo Observatory in May 2017 (enclosed in the red frame)." Credit: PHL @ UPR Arecibo From here.
Answer: The original team suggested three main possibilities: "(1) unusual stellar activity, (2) emissions from other background objects, or (3) interference from satellite communications" Clearly radio astronomers know about satellites.
They noted "in the absence of solid information about the signal, most astronomers would think that [radio interference or instrumental failures] would probably be the most likely explanation."
The satellite hypothesis has some problems it "does not yet explain the strong dispersion-like features of the signals (diagonal lines in the [weird] figure [in the question])" But on a balance of probablities this is the most likely explanation.
There are satellites in the region of Ross 128:
Location of Geostationary satellites operating between 4-8 GHz in the same region of the sky as Ross 128 (yellow dot).
Credit: Enriquez et al. (SETI Berkeley), http://seti.berkeley.edu/ross128.pdf
Your analysis of the location of a satellite, based on the geometry of satellite orbiting over the equator is not supported by this image.
The observers did not claim to have received a SETI signal. Indeed if you look at their original report they note that "aliens" is at the bottom of their possible explanations. The signal looks different from typical satellite bursts. They hoped the signal was astronomical in nature so there may have been a little wishful thinking.
So the most likely explanation is that we are observing multiple reflections from geostationary satellites transmitting on the 4-8 GHz band.
All quotations from the press release from the planetary habitability observatory. | {
"domain": "astronomy.stackexchange",
"id": 2377,
"tags": "radio-astronomy, artificial-satellite"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.