anchor stringlengths 0 150 | positive stringlengths 0 96k | source dict |
|---|---|---|
Snakes and Ladders OOP | Question: I've created a simple Snakes and Ladders game. The goal is improving my knowledge about OOP.There are some basic rules.
I need to be able to move my token.
The token starts in the square 1.
If the token is on square 1 and is moved by 3 then the token is on square 4.
A player can win the game.
The first who reaches the square 100 is the winner.
You can't over-square the board. If you do it your position will remain the same. (97 + 4 = 97)
The moves are based on a roll dice between 1 and 6.
The player rolls the dice, then moves the token.
Keeping all this in mind as I did.
Class CBoard.cs
using System;
using System.Collections.Generic;
namespace Snakes_and_ladders
{
class CBoard
{
private int[] board;
List<CPlayer> players = new List<CPlayer>();
public int[] Board { get => board; }
public CBoard()
{
// A 100-cell board is created by default.
board = new int[100];
Array.Clear(board, 0, board.Length);
}
// Function to create Ladders and Snakes.Se changes the value
// of the array in the index [i-1] being i the key of the
// dictionary.the value that is saved in that index corresponds
// to the value of the board where the player moves in case of
// falling into said Index.
//
// Ex: Key => 2, Value => 10 implies that there is a ladder that
// goes from cell 1 to cell 9 of the board.
private void createSnakesOrLadders(Dictionary<int, int> dataDict)
{
foreach (KeyValuePair<int, int> data in dataDict)
{
board[data.Key - 1] = data.Value - 1;
}
}
// Default constructor overload
// Creates an A x L board by adding ladders and snakes.
public CBoard(int altura, int largo,
Dictionary<int, int> ladders = null, Dictionary<int, int> snakes = null)
{
// At a minimum, a 2x2 board is necessary.
if (altura < 2 || largo < 2)
throw new Exception("The height and length need to be at least greater than 1.");
// Initial size of the number of ladders and snakes on the board.
int ladderSize = 0;
int snakesSize = 0;
// If the board is not null, we save the actual number of ladders and snakes.
if (!(ladders is null))
ladderSize = ladders.Count;
if (!(snakes is null))
snakesSize = snakes.Count;
// We create the board, with values set to 0.
board = new int[altura * largo];
Array.Clear(board, 0, board.Length);
// If the total size of the number of ladders and snakes is less than half the board
// ladders and snakes are created on the board. If not, the exception is thrown.
if ((ladderSize * 2) + (snakesSize * 2) / 2 < board.Length)
{
if (!(ladders is null))
createSnakesOrLadders(ladders);
if (!(snakes is null))
createSnakesOrLadders(snakes);
}
else
{
throw new Exception("The total sum of Snakes and Ladders cannot exceed 50% of the board.");
}
}
}
}
Class CPlayer.cs
using System;
using System.Threading;
namespace Snakes_and_ladders
{
class CPlayer
{
int[] board;
private int position = 0;
private string nickName = null;
private int diceResult = 0;
private bool winner = false;
public int Position { get => position + 1; }
public int DiceResult { get => diceResult; }
public string NickName { get => nickName; }
public bool Winner { get => winner; }
public CPlayer(string nickName, CBoard board) {
this.nickName = nickName;
this.board = board.Board;
}
public void Roll()
{
// Wait 30 milliseconds to change the random seed.
Random rnd = new Random();
Thread.Sleep(30);
diceResult = rnd.Next(1, 7);
}
public void Move()
{
// Move the player N dice cells.
if (position + diceResult < board.Length)
if (board[position + diceResult] == 0)
position = position + diceResult;
else
position = board[diceResult + position];
if (position == board.Length - 1)
winner = true;
}
}
}
Program.cs
using System;
using System.Collections.Generic;
namespace Snakes_and_ladders
{
class Program
{
static void Main(string[] args)
{
// Ladders
Dictionary<int, int> ladderDicctionary = new Dictionary<int, int>()
{
{2, 38}, {7, 14}, {8, 31}, {16, 26}, {21, 42},
{28, 84}, {36, 44}, {51, 67}, {71, 91}, {78, 98}, {87, 94}
};
// Snakes
Dictionary<int, int> snakesDicctionary = new Dictionary<int, int>()
{
{15, 5}, {48, 10}, {45, 24}, {61, 18}, {63, 59},
{73, 52}, {88, 67}, {91, 87}, {94, 74}, {98, 79}
};
CBoard customBoard = new CBoard(10, 10, ladderDicctionary, snakesDicctionary);
// Board by default.
CBoard board = new CBoard();
List<CPlayer> players = new List<CPlayer>();
int n_players = 0;
do
{
Console.Write("Enter the number of players: ");
n_players = Convert.ToInt32(Console.ReadLine());
if(n_players <= 1)
Console.WriteLine("The total of players need to be 2 or more.");
} while (n_players <= 1);
for(int i=1; i < n_players + 1; i++)
{
Console.Write("Enter the name for the player {0}: ", i);
string nickName = Console.ReadLine();
players.Add(new CPlayer(nickName, customBoard));
}
string pressed = "";
int count = 0;
do
{
if (count >= n_players)
count = 0;
CPlayer currentPlayer = players[count];
Console.WriteLine("It's the player {0}'s turn", currentPlayer.NickName);
Console.WriteLine("Press R to Roll the die or A to abort the game.");
pressed = Console.ReadLine();
if(pressed.Equals("R", StringComparison.CurrentCultureIgnoreCase)) {
Console.WriteLine("--------------------------------");
currentPlayer.Roll();
Console.WriteLine("Dice's result: {0} ", currentPlayer.DiceResult);
int previousPosition = currentPlayer.Position;
currentPlayer.Move();
Console.WriteLine("You moved from cell [{0}] ====> to cell [{1}]", previousPosition, currentPlayer.Position);
if (currentPlayer.Winner)
{
Console.WriteLine("Player {0} won the game.", currentPlayer.NickName);
break;
}
Console.WriteLine("--------------------------------");
count++;
}
} while (!pressed.Equals("A", StringComparison.CurrentCultureIgnoreCase));
Console.ReadLine();
}
}
}
Based on the code, I'd like to know what I could improve, what mistakes I did, what advice you'd give me, in a nutshell: How could I improve the code and be better at OOP. Thanks!
Answer:
When an array is created, it is nulled automatically. Array.Clear(board, 0, board.Length); is obsolete.
Rolling the dice: You are waiting a few milliseconds before creating the Random object to get different seeds. Better: Create it as a static field. The effect is that only one Random object will be created (and seeded once), no matter how many players you create.
class CPlayer
{
private static readonly Random rnd = new Random();
public void Roll()
{
diceResult = rnd.Next(1, 7);
}
...
}
We can also mark board as readonly since it is set only in the constructors. Note: readonly refers to the reference stored in board, not the contents of the board.
You are using dictionaries for the definition of snakes and ladders; however, you are never doing a dictionary lookup. Dictionaries are fast when looking up a value by key either through the indexer or the method TryGetValue. Inserting into a dictionary involves some overhead. Better use a list or an array.
E.g., you could create an array of KeyValuePair or ValueTuple:
(int from, int to)[] ladders = { (2, 38), (7, 14),… };
You are testing whether ladders and snakes are null twice. Do it once. You can still throw an exception after having added ladders and snakes to the board. This does not hurt. In C# 9.0 you can use the test is not null:
if (ladders is not null) {
ladderSize = ladders.Count;
createSnakesOrLadders(ladders);
}
// TODO: Do the same for the snakes.
The calculation of the total number of snakes and ladders is wrong. It must be:
if (ladderSize + snakesSize > board.Length / 2) {
throw new Exception("The total sum of Snakes and Ladders cannot exceed 50% of the board.");
}
In class CPlayer you are initializing position, nickName, diceResult and winner to their default values. Other than local variables, class fields are automatically initialized to their default values. You can do it if you think that it better reflects you intention, but it is not necessary.
The usual naming conventions for C# is to not prepend a C for class names. Often developers prepend field names with an underscore to better distinguish them from local variables.
Optional: You could simplify moving the token if instead of initializing the board with 0s, you initialized it with the indexes:
for (int i = 0; i < board.Length; i++) { board[i] = i; }
Then, no matter whether there is a snake or ladder or not:
position = board[diceResult + position];
Another simplification would be to replace null initial snakes and ladders arrays by empty ones. This eliminates some checks and the need for storing the sizes in variables as you can then directly use the array lengths.
public CBoard(int altura, int largo,
(int from, int to)[] ladders = null, (int from, int to)[] snakes = null)
{
ladders = ladders ?? Array.Empty<(int, int)>();
snakes = snakes ?? Array.Empty<(int, int)>();
...
Instead of testing i < n_players + 1, you can test i <= n_players.
You can use modulo arithmetic to make the player index turn around:
count = (count + 1) % n_players;
You can use string interpolation:
Console.WriteLine($"Player {currentPlayer.NickName} won the game.");
is easier to read than
Console.WriteLine("Player {0} won the game.", currentPlayer.NickName);
You can use auto properties in some cases. They automatically create an invisible backing field.
private readonly string nickName;
public string NickName { get => nickName; }
Can be replaced by
public string NickName { get; }
Note that a getter-only property can be set in the constructor.
By using
// pressed is declared as char
pressed = Char.ToUpper(Console.ReadKey().KeyChar);
Console.WriteLine();
instead of
pressed = Console.ReadLine();
The user does not need to press Enter after entering R or A and you eliminate the need to do a complex comparison involving StringComparison.CurrentCultureIgnoreCase.
I suggested some changes using C# techniques that you might not be familiar with. You can just ignore them and keep your approach, if you prefer.
Here is a possible solution
class Board
{
public int[] GameBoard { get; }
public Board()
{
GameBoard = CreateBoard(100);
}
public Board(int altura, int largo,
(int, int)[] ladders = null, (int, int)[] snakes = null)
{
if (altura < 2 || largo < 2) {
throw new Exception("The height and length need to be at least greater than 1.");
}
// Ensure non-null arrays.
ladders = ladders ?? Array.Empty<(int, int)>();
snakes = snakes ?? Array.Empty<(int, int)>();
GameBoard = CreateBoard(altura * largo);
if (ladders.Length + snakes.Length > GameBoard.Length / 2) {
throw new Exception("The total sum of Snakes and Ladders cannot exceed 50% of the board.");
}
CreateSnakesOrLadders(ladders);
CreateSnakesOrLadders(snakes);
}
private int[] CreateBoard(int size)
{
int[] board = new int[size];
for (int i = 0; i < size; i++) {
board[i] = i;
}
return board;
}
private void CreateSnakesOrLadders((int, int)[] jumps)
{
foreach (var (from, to) in jumps) {
GameBoard[from - 1] = to - 1;
}
}
}
class Player
{
private static readonly Random _rnd = new Random();
private readonly int[] _board;
private int _position;
public int Position => _position + 1;
public int DiceResult { get; private set; }
public string NickName { get; }
public bool Winner { get; private set; }
public Player(string nickName, Board board)
{
NickName = nickName;
_board = board.GameBoard;
}
public void Roll()
{
DiceResult = _rnd.Next(1, 7);
}
public void Move()
{
if (_position + DiceResult < _board.Length) {
_position = _board[DiceResult + _position];
if (_position == _board.Length - 1) {
Winner = true;
}
}
}
}
class Program
{
public static void Main_()
{
(int, int)[] ladders = {
(2, 38), (7, 14), (8, 31), (16, 26), (21, 42),
(28, 84), (36, 44), (51, 67), (71, 91), (78, 98), (87, 94)
};
(int, int)[] snakes = {
(15, 5), (48, 10), (45, 24), (61, 18), (63, 59),
(73, 52), (88, 67), (91, 87), (94, 74), (98, 79)
};
var board = new Board(10, 10, ladders, snakes);
var players = new List<Player>();
int numPlayers;
do {
Console.Write("Enter the number of players: ");
numPlayers = Convert.ToInt32(Console.ReadLine());
if (numPlayers <= 1) {
Console.WriteLine("The total of players need to be 2 or more.");
}
} while (numPlayers <= 1);
for (int i = 1; i <= numPlayers; i++) {
Console.Write($"Enter the name for the player {i}: ");
string nickName = Console.ReadLine();
players.Add(new Player(nickName, board));
}
char pressed;
int count = 0;
do {
Player currentPlayer = players[count];
Console.WriteLine($"It's the player {currentPlayer.NickName}'s turn");
Console.WriteLine("Press R to Roll the dice or A to abort the game.");
pressed = Char.ToUpper(Console.ReadKey().KeyChar);
Console.WriteLine();
if (pressed == 'R') {
Console.WriteLine("--------------------------------");
currentPlayer.Roll();
Console.WriteLine($"Dice's result: {currentPlayer.DiceResult} ");
int previousPosition = currentPlayer.Position;
currentPlayer.Move();
Console.WriteLine($"{currentPlayer.NickName} moved from cell [{previousPosition}] ====> to cell [{currentPlayer.Position}]");
if (currentPlayer.Winner) {
Console.WriteLine($"Player {currentPlayer.NickName} won the game.");
break;
}
Console.WriteLine("--------------------------------");
count = (count + 1) % numPlayers;
}
} while (pressed != 'A');
}
}
Q & A
Also, just to be sure, what would be the appropriate scenarios where you would use auto properties?
Auto properties were introduced in C# 3.0 and were improved in later versions. They aim to make your life easier. Use them when ever you can, i.e., when there is no logic other than getting or setting the backing field. In Position we cannot use it because we are returning _position + 1.
instead of var in the foreach statement, what'd be its type? I tried with (int,int)[] but it didn't work. Happened the same with (int, int)
Tuples can be deconstructed, i.e., their components can be extracted into variables. The foreach loop does it with var (from, to) in jumps. This could also be written as (int from, int to) in jumps. This declares two new int variables from and to and assigns them the tuple elements.
Alternatively, we could keep the tuple as is and write var jump in jumps or explicitly (int, int) jump in jumps and then access its components through the loop variable jump with jump.Item1 and jump.Item2. We can also give custom names to the tuple items: (int from, int to) jump in jumps and then access them with jump.from and jump.to.
Note that we are looping through an array (int, int)[] jumps, i.e., an array consisting of elements of the tuple type (int, int). Therefore, the loop variable must be a (int, int).
As you can see, we have a lot of options here: deconstruct vs. keeping the tuple, implicit versus explicit type declarations, using standard vs. custom tuple item names.
public int Position => _position + 1; this line is acting like a getter? It means that I can omit the get statement in that way?
This is quite a new addition to the language and is called “expression bodied members”. This is only a simplified syntax variant having no special meaning. It can be used whenever a method or a getter consists of a single return statement or when a void method, a setter or a constructor contains a single expression (where an assignment is an expression). Examples:
string GetGreeting() { return “Hello”; } same as string GetGreeting() => “Hello”;.
string Greeting { get { return “Hello”; } } same as string Greeting => “Hello”;.
string Greeting { get { return g; } set { g = value; } } same as
string Greeting { get => g; set => g = value; }. | {
"domain": "codereview.stackexchange",
"id": 42295,
"tags": "c#, object-oriented"
} |
What is up and down conversion in photonics? | Question: I have heard the terms up and down conversion in photonics/photovoltaics articles. What do the terms mean?
Answer: I complete Gerard's answer: up-conversion is the reverse process, where two low frequency photons are converted to a single high-frequency photon.
So basically down- and up-conversion correspond to a frequency conversion of the photons through a nonlinear interaction. The up/down term correspond to the "direction" of the frequency change.
I have to add that the "parametric" term has nothing to do with entanglement, but with the fact that the χ⁽²⁾ nonlinearity used is analogue to a classical parametric oscillator http://en.wikipedia.org/wiki/Parametric_oscillator . | {
"domain": "physics.stackexchange",
"id": 96476,
"tags": "photons, terminology, solar-cells, photonics"
} |
Was the mitochondrion or chloroplast first? | Question: I still don't know if the mitochondrion or chloroplast was first? I've looked for it on the internet and in various books but haven't found anything. Does anyone have the answer and a theory which backs up this answer?
Answer: Mitochondria evolved before chloroplasts.
We know this because Mitochondria form a monophyletic group: e.g. all life with mitochondria traces back to a single common ancestor (source). Since the group with chloroplasts groups within this clade, it must be the case that either (a) chloroplasts were obtained by an organism that already had mitochondria or (b) chloroplasts were independently lost by multiple lineages within the Eukaryotic clade and then many of these lineages re-acquired chloroplasts by secondary endosymbiosis. Since (a) is a (much) more parsimonious explanation it is the one it makes sense to accept. | {
"domain": "biology.stackexchange",
"id": 9094,
"tags": "cell-biology, mitochondria, chloroplasts"
} |
Does stereo_proc undistort images? | Question:
I have tried to use the image_pipeline stereo_image_proc to get the rectified images.
The question is, when stereo_image_proc produces image_rect images, should they be undistorted? Have a look at my screenshot:
I have given the distortion matrix to the topic and I have tried to undistort the images myself and quite happy with it.
Originally posted by EdwardNur on ROS Answers with karma: 115 on 2019-03-16
Post score: 0
Answer:
Yes, it is written here. Quote :
stereo_image_proc performs the duties of image_proc for both cameras, undistorting and colorizing the raw images. Note that for properly calibrated stereo cameras, undistortion is actually combined with rectification, transforming the images so that their scanlines line up for fast stereo processing.
Originally posted by tuandl with karma: 358 on 2019-03-16
This answer was ACCEPTED on the original site
Post score: 0 | {
"domain": "robotics.stackexchange",
"id": 32657,
"tags": "ros, ros-melodic, image-pipeline"
} |
Is it true that we see the center of the milky-way for only half of the year? | Question: I'm not an astronomer, please excuse my non-formal language.
Since we are located in one of the arms of the Milky way, the center of the galaxy should be in one direction from our location, while in the other direction, we stare into the "other side".
So at nights as we are in the orbit area that is between the sun and the center of the galaxy we are able to see the center (the "milky-way"), while when in the other side of the orbit, at nights, we are not able to see it.
Does this mean we only see the milky way (meaning the milky-way's famous pattern) half of the year?
In the below diagram, the small circle is our year-orbit around our sun. Red dot is the earth location on day 0 of the year and the green dot is the earth location on day 180th of the year.
I know that as we are orbiting our sun, it also orbits the galaxy, but since it is so slow, it is negligible for this questions.
Is this reasonable?
I mean, if we look at the sky, we will see the center of the galaxy only during half of the year?
Answer: No, although there are times when it can't be seen, it isn't true that it is visible for 183 days of the year.
The general question could be "If I take an arbitrary location on the sky (say a randomly chosen star) will it be visible for half the year? The answer is "it depends on the star!" Polaris and many other stars are circumpolar for Northern hemisphere observers and are visible every night, contrariwise, Polaris is never visible for Southern Hemisphere observers.
Now, the galactic centre happens to be located in Sagittarius, so co-incidentally is quite close to the plane of the solar system. This means that for about two months of the year, the sun is too close to make observations easy, certainly in visible light from the Earth's surface. At other times it will be visible, perhaps briefly. Sagittarius and the galactic centre are also quite a few degrees South of the equator, so it is more easily visible from the southern hemisphere.
At 50 degrees North, The galactic centre sets at twilight at the start of October and doesn't rise until dawn at the middle of February, between February and October (about 7½ months) the galactic centre is above the horizon at some point during the night. (The exact number of days depends on your definition of "dawn" and "above the horizon") More southerly observers will see the galactic centre for longer. 50 degrees South, the galactic centre is above the horizon at some time during the night between January and November. In Antarctica, the galactic centre is circumpolar, and so is above the horizon every night. But as the summer sun in Antarctica doesn't set, there are days when you can't "see" it. The optimum location would be 30 degrees South (or in space) | {
"domain": "astronomy.stackexchange",
"id": 4616,
"tags": "solar-system, milky-way"
} |
tf problem. scan samples always plotted wrt origin | Question:
Hello,
I'm trying to run gmapping over a pionner AT using rosaria and a hokuyo laser. Everything seems to work fine and I'm able to move the robot. However, when the robot acquires the laser measurements they are always plotted in rviz with respect to the origin of coordinates instead of the base_link. I apply some dynamic basic transformations and check that the odometry is represented correctly.
The frame tree is the following: /laser -> base_laser -> base_link -> odom -> map as the basic tf tutorial explains
Do you have any idea about why the laser measurements are fixed with respect to the origin of coordinates while the base_link is moving? Are the transformations chained between them?
Originally posted by Ferherranz on ROS Answers with karma: 41 on 2013-01-29
Post score: 0
Original comments
Comment by Ben_S on 2013-01-29:
Please give us some more informations. Are the transformations base_link -> base_laser and base_laser -> laser static? And if not, who is responsible for publishing them? Where do your coordinatesystems appear in rviz if you turn them on? Maybe a screenshot of rviz and your tf-tree?
Answer:
I'm not sure what you mean by "plotted with respect to origin".
Are they absolute to the origin? Do they appear at the place in the map, where they actually belong (i.e. do they match the map)? In that case this is the correct and expected behaviour. If you want to see it differently, i.e. focused around the robot, set the Fixed Frame to /base_link.
Are all laser scans displayed at the origin while the robot moves (correctly)? In that case something is set up wrong, probably with tf. When setting the fixed frame to /laser the scans should stay centered (like in a pure laser viewer). With fixed frame to /odom, /map they should move. You can check how the tf information looks with the tf plugin.
Originally posted by dornhege with karma: 31395 on 2013-01-29
This answer was ACCEPTED on the original site
Post score: 0 | {
"domain": "robotics.stackexchange",
"id": 12618,
"tags": "ros"
} |
Temperature at different points in a metal rod during heat conduction | Question: $$ k = \frac{\frac{Q}{t}}{A(\frac{T_1 - T_2}{L})} $$
where k is thermal conductivity of the solid,
Q is total amount of heat transferred,
t is time taken for the heat transfer,
A is area of the cross section,
L is the length of the solid
and T1 and T2 are the temperatures at the hotter end and the colder end respectively.
According to this formula, when a metal rod is getting heated through conduction, the temperature gradient $$\frac{T_1 - T_2}{L}$$ decides what the temperature will be at different points down the length. However, physics also states that when heat transfer takes place, it goes on until the temperature of the hotter object and the cooler object becomes equal. So, how can heat conduction stop before temperature becomes the same throughout the length of the metal rod without contradicting the basic theory of heat transfer?
Answer:
"However, physics also states that when heat transfer takes place, it goes on until the temperature of the hotter object and the cooler object becomes equal."
That's only true in equilibrium.
In this case, we know the system is not in equilibrium because we have heat being added ($Q$). If $Q = 0$ after some amount of time, $\frac {T_1 - T_2}{L}$ will go to $0$.
I believe this even fits with the wording you chose:
"According to this formula, when a metal rod is getting heated through conduction, the temperature gradient decides what the temperature will be at different points down the length."
(emphasis mine) Basically, the gradient can only exist when there is heat being transferred through the rod. When there is no net heat transfer through the rod, the gradient becomes a flat line due to the bar having a uniform temperature.
Another thing to point out is that they aren't talking about the temperature gradient when convection stops. They are talking about the temperature gradient at some time during the heat transfer. This could either be a steady-state conduction or a snapshot of a transient process at one point in time. | {
"domain": "physics.stackexchange",
"id": 53693,
"tags": "thermodynamics, temperature, heat-conduction"
} |
Are there other properties besides lower boiling point that make isobutane a better refrigerant than butane? | Question: Asked differently, if -1C is low enough for the application is there any reason not to use butane rather than isobutane as the working fluid in a refrigeration system?
Answer: They have different pressure curves:
But to answer the question: yes, there are other properties that make isobutane preferred over n-butane. The most widely
used refrigerants in household appliances are isobutane and R134a according to "Evaluation of N-Butane as a Potential Refrigerant
for Household Compressors" by Preben Bjerre & Per Larsen (2006). See the following quote:
The theoretical evaluation shows at LBP CECOMAF that N-butane (R600) has:
• 2.8% higher theoretical COP compared to isobutane (R600a)
• About 70% of the capacity of isobutane
• 10% higher theoretical COP compared to R134a
The measurements show that the higher theoretical COP of N-butane compared to isobutane can not be found in the
measurements. This is caused by the higher sensitivity to clearance volume, suction gas pressure drop and heating,
compared to isobutane. However the COP level of N-butane in the present compressor design is at the same level as
isobutane, which today is the refrigerant giving the highest COP on the market.
Lifetime test show acceptable results. The wear tendency is comparable with isobutane.
N-butane is an option for reaching high COP levels in household appliances, but it does not offer significant
advantages to isobutane on existing isobutane stroke volumes. However it opens the possibility to extend the range
to lower capacity by using the existing compressor designs. The disadvantages are that the cost and size are
unchanged.
$\huge \text{Glossary:} $
LBP: Low Back Pressure. "The minimum evaporating temperature and the condensing temperature allows for the
identification of the compressor application (LBP, MBP, or HBP). Low Back Pressure
systems such as freezers have evaporator temperatures below -20ºC (-4ºF). "
CECOMAF: European Committee of Refrigeration Equipment Manufacturers. Or, in French Comité Européen des Constructeurs de Matériel Frigorifique.
COP: Coefficient of Performance. "For a refrigeration system a COP of 4 indicates that 1 kW of electricity is needed for a evaporator to extract 4 kW of heat."
R134a: refrigerant code name for 1,1,1,2-Tetrafluoroethane:
R600: refrigerant code name for n-butane:
R600a: refrigerant code name for isobutane: | {
"domain": "physics.stackexchange",
"id": 48663,
"tags": "thermodynamics, material-science, physical-chemistry, freezing, chemical-compounds"
} |
Reason behind formation of positive supercoils during DNA replication/ transcription | Question: When a twist is unwound without cutting the DNA strands or is removed by cutting the strand(s) and resealing, negative supercoils are introduced in the DNA.
From Cell and Molecular Biology -Karp
But strangely enough unwinding of DNA by helicase cause the DNA ahead of the primosome to form positive supercoils. Why is it so?
From: Molecular cell biology -Lodish
My speculations: Underwinding cause the adjacent DNA to overwind and as the DNA overwinds the associated stress developed cause the DNA to form positive supercoils (which would have happened naturally had new twists been introduced in the DNA (Time:1:54) ).
I think removal of twist creates negative supercoils initially but when the unwinding causes too much overwinding of adjacent DNA, the strain developed cause the already negatively supercoiled DNA to form positive supercoils.
Answer: It's hard to explain in text, so here's a video:
https://www.youtube.com/watch?v=J4YlcD59-yw
Imagine the shoelaces are two DNA strands in a double helix. They are topologically constrained at each end. As the pen (helicase) moves through the helix, it creates overwound DNA in front of it and underwound DNA behind it. | {
"domain": "biology.stackexchange",
"id": 6625,
"tags": "molecular-biology, dna, dna-replication, supercoiling"
} |
Expression of mass flow rate for steady flow | Question: According to Wikipedia the mass flow rate is given by:
$${\dot {m}}=\iint _{CS}\rho \mathbf {v} \cdot {\rm {d}}\mathbf {A}$$
And using the Reynolds Transport Theorem (if there is no sources):
$$\dfrac{dm_{sys}}{dt} =\iint_{CV}\dfrac{\partial \rho}{\partial t}\mathrm dV\llap{--} + \iint _{CS}\rho \mathbf {v} \cdot {\rm {d}}\mathbf {A} = 0$$
If the flow is steady then:
$$\dot m = 0$$
But as far as I know the mass flow rate for steady flow is also given by $$\dot m = \rho\cdot V \cdot A$$
And it will never be zero (if $\rho$, $V$, and $A$ are not zero)
What I am missing?
Answer: Working through the math:
$$
\dot{m} = \frac{dm}{dt} = \iiint_{CV} \frac{\partial \rho}{\partial t} dV + \iint_{CS} \rho \vec{V}\cdot dA
$$
If the flow is steady, all time derivatives are zero and you are left with:
$$
0 = \iint_{CS} \rho \vec{V} \cdot dA
$$
If we consider a 1D flow just as an example, this will give you:
$$
0 = -u_{\text{left}} \rho_{\text{left}} A_{\text{left}} + u_{\text{right}} \rho_{\text{right}} A_{\text{right}}
$$
or
$$
u_{\text{left}} \rho_{\text{left}} A_{\text{left}} = u_{\text{right}} \rho_{\text{right}} A_{\text{right}}
$$
which is an expression that is probably familiar. So, that all shows that $\dot{m} = 0$.
Now to your other equation, $\dot{m}_{\text{system}} = \rho V A$. That doesn't hold for steady flow. So the part you are missing is that your second statement isn't true, and there's no way to get this expression from the equation we started with if we assume the flow is steady. This expression will sometimes show up when specifying boundary conditions, as in $\dot{m}_{\text{in}} = \rho V A$ -- but if it is steady, that means that $\dot{m}_{\text{out}} = \rho V A$ also and the total $\dot{m}$ of the system is zero.
In other words, if you see something that says $\dot{m} = \rho V A$ and it also says the flow is steady, it means they are specifying the $\dot{m}$ across one of the control surfaces. Not all of the control surfaces. | {
"domain": "physics.stackexchange",
"id": 56405,
"tags": "fluid-dynamics, mass, conservation-laws, flow"
} |
Can genetically engineering the DNA of a human zygote, make it a twin of another human in entirety? | Question: My question is simple. If I wanted to make an exact twin of an individual, will genetically engineering the DNA of the zygote to match with the individual suffice?
Answer: The answer is no, and it is because sequence of the genome is not all information that is required for gene expression and development, there are also epigenetic factors.
A lot of patterns of epigenetic marks, such as most of DNA methylation and some histone modification patterns, are set during parental germline development, and these marks are transmitted to the zygote by gamets (except paternal histones, those are not transmitted because they are temporarily substituted for protamines in sperm for greater compaction). Therefore, the naked DNA inserted into zygotic nucleus will not have access to the machinery that established epigenome during development of the gametes.
And even though nucleosomes may assemble in the zygote on the naked DNA and histones may acquire post-translational modifications, and cytosines may get methylated, but most definitely the zygote is going to have no means to correctly set distinct patterns of epigenetic marks at alleles that are normally imprinted, i.e. those alleles whose epigenetic state depends on the gender of the organism that produced the gamete.
Therefore, the twin created in this way will have the same DNA sequence, but it will be a subject to disorders associated with imprinting | {
"domain": "biology.stackexchange",
"id": 3501,
"tags": "genetics, reproduction, human-genetics"
} |
Question regarding the colour of Benzoic acid and Benzyl alchohol | Question: This question is primarily based on the Cannizzaro reaction, where a base induces a disproportionation reaction of non-enolizable aldehydes (benzaldehyde, formaldehyde, etc) to form an alcohol and a carboxylic acid.
In the video by Nile Red exploring the Cannizzaro reaction (https://www.youtube.com/watch?v=qEBC204WTKs), the product mixture contained both Benzoic acid and Benzyl alcohol and had an orange color. I am aware that the benzoic acid was in the form of sodium benzoate due to sodium hydroxide also reacting with the acid produced.
My question is, where does the colour come from?
I am guessing it is either from the benzyl alcohol or sodium benzoate, but I have learned that most organic compounds are colorless.
Moreover, if the aldehyde used was something like formaldehyde, will the orange color also form?
Thanks in advance for the clarifications.
Answer: none of the components you mentioned (Benozic acid, Benzyl alcohol, sodium benzoate) has a yellow colour.
The colour in organic compouns is closely tied to ther delocalized sytems of pi-electrons and depends on the gap between the highest occupied molecular orbital (HOMO) and the lowest unoccupied molecular orbital (LUMO).
When the energy of this gap corresponds to a wavelength that is visible, the compound absorbs this wavelength and the colour of the compound is the complementary colour.
A good example are condensated aromats. For benzene or naphthalene the gap between HOMO and LUMO is too big therefore, the corresponding wavelength is in the area of UV light.
For anthracene, the absorbed wavelenght is violett and therefore the colour of the compound is yellow.
Back to your question. Because the reaction happends under strong basic conditions, high temperature and with many aromatic compounds, I think sidereactions will occure where multiple aromats condensate together and therefore the solution turns yellow to orange. | {
"domain": "chemistry.stackexchange",
"id": 15295,
"tags": "organic-chemistry, redox, aromatic-compounds"
} |
Error using rqt with Xming 6.9.0.31 through PuTTY | Question:
I am using Ubuntu 12.04 64-bit server in a Virtual Machine on my Win7 x86-64 laptop, I have been using text-only terminals with the VM all the time. I just installed ROS "hydro" base version on the VM using apt tools.
[u@ubuntu:~/ros/catkin_ws]$ lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 12.04.5 LTS
Release: 12.04
Codename: precise
By using Xming 6.9.0.31 X server on my Win7 host, I can use "turtlesim" app correctly and the turtle can be moved using keyboard app or rostopic commands.
But error occurs when I tried to use the "rqt" tool:
[u@ubuntu:~/ros/catkin_ws]$ rosrun rqt_graph rqt_graph
[xcb] Unknown sequence number while processing queue
[xcb] Most likely this is a multi-threaded client and XInitThreads has not been called
[xcb] Aborting, sorry about that.
python: ../../src/xcb_io.c:274: poll_for_event: Assertion `!xcb_xlib_threads_sequence_lost' failed.
Aborted (core dumped)
BTW, I was using Putty to forward the access to the DISPLAY server on my Win7 host from within the Ubuntu VM and the rviz seems can show up a Window on my Win7 host w/o issues.
Does anyone have any hints here?
Regards,
Yf
Originally posted by yf on ROS Answers with karma: 1 on 2015-05-04
Post score: 0
Answer:
This a quite a specific and unique setup you have there ;-)
So I can't easily reproduce this, but searching for your error message brought me to the Qt.AA_X11InitThreads flag, that can be set before the QApplication is constructed:
http://doc.qt.io/qt-4.8/qt.html
This flag is not being set currently by rqt, so you could try running rqt from source and edit the qt_gui/main.py file at about line 173:
https://github.com/ros-visualization/qt_gui_core/blob/groovy-devel/qt_gui/src/qt_gui/main.py#L173
Directly before the line constructing the QApplication instance add a new line setting the mentioned attribute:
QApplication.setAttribute(Qt.AA_X11InitThreads, True)
app = QApplication(argv)
If that solves your problem, you can file a pull request with this change, so it gets tested on other setups and can hopefully be added to the code.
Originally posted by Dorian Scholz with karma: 391 on 2015-06-24
This answer was ACCEPTED on the original site
Post score: 1
Original comments
Comment by gvdhoorn on 2016-06-06:
Update: this was added to rqt in ros-visualization/qt_gui_core#62. | {
"domain": "robotics.stackexchange",
"id": 21604,
"tags": "ros-hydro, rqt"
} |
Difficulty in proof of relationship between $Y$, $B$, $\sigma$ | Question: On internet, I was looking for the proof regarding the following relationship between $Y$ young's modulus,$\sigma$ poisson's ratio, and $B$ bulk modulus: $Y=3B(1-2\sigma)$, and I came across the following proof, which I have difficulty in understanding it.
Consider a cube subjected to three equal stresses S on the three faces
The total strain in one direction or along one face due to the application of
volumetric stress S is given as
$$\text{linear Strain} = S/Y – \sigma S/Y –\sigma S/Y = S/Y( 1-2\sigma)$$
So $\text{linear Strain} =(S/E)( 1-2r)$. Now $\text{Volumetric Strain} = 3 \cdot \text{Linear Strain}$.
So $\text{Volumetric Strain} = 3 (S/Y)( 1-2\sigma)$
Now by definition of bulk modulus of elasticity K. We have $K= \frac{\text{volumetric Stress}} {\text{volumetric Strain}}$.
$K= \frac{S}{{(3(S/Y)( 1-2\sigma)}}$ , So $Y=3K(1-2\sigma)$
I am having main doubt regarding linear strain equation, especially why $–\sigma S/Y$ is being added two times. Rest of the proof is understandable to me. So please help me there. Moreover I want to ask is there, any simpler proof for this equation?
Also, while finding/researching, I can't come up with proof for another similar equation: $Y=2\eta(1+\sigma)$, where $\eta$ is shear modulus. So I want help here too.
Answer: Hre is a general set of relations:
Start from the isotropic stress-strain relation
$$
\sigma_{ij} = \lambda \delta_{ij} e_{kk} + 2\mu e_{ij}
$$
in terms of the Lame constants $\lambda$, $\mu$.
By considering particular
deformations, we can express the more directly
measurable bulk modulus, shear modulus,
Young's modulus and
Poisson's ratio in terms of $\lambda$ and $\mu$.
The bulk modulus bulk modulus $\kappa$ is defined by
$$
dP =-\kappa \frac {dV}{V},
$$
where an infinitesimal isotropic external pressure $dP$ causes a
change $V\to V+dV$ in the volume of the material.
This applied pressure
corresponds to a surface stress of
$\sigma_{ij}=-\delta_{ij}\,dP$.
An isotropic expansion away from the origin displaces points $x_i\to x_i+\eta_i$ in the material so
that
$$
\eta_i =\frac 13 \frac {dV}{V}x_i.
$$
The strains
$$
e_{ij}= \frac 12 (\partial_i\eta_j+\partial_j \eta_i)
$$
are therefore given by
$$
e_{ij} = \frac 13 \delta_{ij} \frac {dV}{V}.
$$
Inserting this strain into the stress-strain relation gives
$$
\sigma_{ij}= \delta_{ij}(\lambda +\frac 23 \mu)\frac
{dV}{V}= - \delta_{ij} dP.
$$
Thus
$$
\kappa= \lambda+\frac 23 \mu.
$$
To define the shear modulus, we assume a deformation $\eta_1 =
\theta x_2$, so $e_{12}=e_{21}=\theta/2$, with all other
$e_{ij}$ vanishing.
The
applied shear stress is $\sigma_{12}=\sigma_{21}$. The
shear modulus, is defined to be $\sigma_{12}/\theta$. Inserting the strain components into
the stress-strain relation gives
$$
\sigma_{12} =\mu\theta,
$$
and so the shear modulus is equal to the Lame constant $\mu$.
We can therefore write the generalized Hooke's law as
$$
\sigma_{ij} = 2\mu(e_{ij} -{\textstyle{\frac 13}} \delta_{ij} e_{kk})
+\kappa e_{kk} \delta_{ij},
$$
which reveals that the shear modulus is associated with the traceless
part of the strain tensor, and the bulk modulus with the
trace.
Young's modulus $Y$ is measured by stretching a wire of
initial length
$L$ and square cross section of side $W$ under a
tension $T=\sigma_{33}W^2$.
We define $Y$ so that
$$
\sigma_{33} = Y\frac {dL}{L}.
$$
At the same time as the wire stretches, its
width changes $W\to W+dW$. Poisson's ratio $\sigma$ is defined by
$$
\frac{dW}{W}=-\sigma \frac{dL}{L},
$$
so that $\sigma$ is positive if the wire gets thinner as it
gets longer. The displacements are
$$
\eta_3 = z \left(\frac {dL}{L}\right),\nonumber\\
\eta_{1}=x
\left(\frac{dW}{W}\right) = - \sigma x
\left(\frac{dL}{L}\right),\nonumber\\
\eta_{2} = y
\left(\frac{dW}{W}\right)= - \sigma y
\left(\frac{dL}{L}\right),
$$
so the
strain components are
$$
e_{33} = \frac {dL}{L},\quad e_{11}=e_{22} =
\frac{dW}{W}=-\sigma e_{33}.
$$
We therefore have
$$
\sigma_{33} = (\lambda(1-2\sigma) +2\mu)\left(\frac
{dL}{L}\right),
$$
leading to
$$
Y= \lambda(1-2\sigma)+2\mu.
$$
Now, the side of the wire is a free surface with no forces
acting on it, so
$$
0=\sigma_{22}=\sigma_{11} = (\lambda(1-2\sigma) -2\sigma \mu)\left(\frac
{dL}{L}\right).
$$
This tells us that
$$
\sigma =\frac 12 \frac{\lambda}{\lambda+\mu},
$$
and
$$
Y=
\mu\left(\frac{3\lambda+2\mu}{\lambda+\mu}\right).
$$
The desiired relations
$$
Y= 3\kappa(1-2\sigma),\nonumber\\
= 2\mu(1+\sigma),
$$
now follow by simple algebra from those above. | {
"domain": "physics.stackexchange",
"id": 85337,
"tags": "newtonian-mechanics, elasticity, stress-strain"
} |
What criteria have to be met to term a carboxylic acid a 'fatty acid'? | Question: So we were doing the chapter Biomolecules in class the other day, and my teacher told us that every carboxylic acid that consists of an aliphatic chain and a total carbon atom count that's four or more, is a fatty acid.
Now owing to my teacher's (rather annoying) habit of grossly over-simplifying things, I decided to look up 'fatty acids' in my trusty Oxford Science Dictionary (printed in 2003). According to it,
Chain length ranges from one hydrogen atom (in methanoic acid) to nearly 30 carbon atoms.
But a quick Wikipedia search, gives me this:
Most naturally occurring fatty acids have an unbranched chain of an even number of carbon atoms, from 4 to 28.
So now I'm completely lost. Fine, chuck what my teacher said, but the other two sources seem to contradict each other. So I'd really appreciate it, if someone can help me get the following straight (I'd prefer a reliable source, if you're citing anything):
Q1: What is the minimum number of carbons in a carboxylic acid as a whole (includes the carboxyl group)? So is methanoic acid or butanoic acid that's the smallest fatty acid?
Q2: Is there an 'upper limit' on the total carbon atom count for fatty acids? As in; can I synthesize, say, an 80 carbon aliphatic carboxylic acid and still correctly call it a fatty acid?
Q3: Is there any other criterion that's necessary to classify something as a fatty acid that hasn't been mentioned by my teacher or the other sources? If so, what is it?
Answer: I suggest to consult the IUPAC Compendium of Chemical Terminology, aka the Gold Book for a definition of fatty acids!
In a narrow sense, fatty acids are defined as
aliphatic monocarboxylic acids derived from or contained in esterified form in an animal or vegetable fat, oil or wax.
The definition continues to mention that
natural fatty acids commonly have a chain of 4 to 28 carbons […]
The paragraph closes with a wider definition, namely:
By extension, the term is sometimes used to embrace all acyclic aliphatic carboxylic acids.
Personally, I have used the term fatty acids only for natural fatty acids. | {
"domain": "chemistry.stackexchange",
"id": 6772,
"tags": "organic-chemistry"
} |
Square of annihilation operator | Question: Sorry about this question, but due to my limited background in quantum physics I wasn't able to figure out why in ALL the refs I have searched they state it as iffor sure:
Why is the square of a creation/annihilation operator zero?
Given that this is for a Fock space system.
$$\hat{a}^\dagger~\propto~(\omega\hat{q} - i\hat{p}) \tag{2.14}$$
$$ \hat{a}^{\dagger 2} = 0 $$
The 2 means that the operator is in power two.
The same holds true for the annihilation operator. I found some phoney proof (based on the fact that this describes a harmonic oscillator) which I don't like and wish to get feedback from experienced people.
References:
C.C. Gerry & P.L. Knight, Introductory Quantum Optics, 2004; eq. (2.45).
Answer: There are two different types of creation and annihilation operators:
One is the bosonic set, which obeys the commutation relation $[a,a^\dagger]=1$ (and trivially $[a,a]=0=[a^\dagger,a^\dagger]$), and for which you can define quadratures via $q=\frac12(a+a^\dagger)$ and $p=\frac{1}{2i}(a-a^\dagger)$ which obey $[q,p]=i$, i.e. $a=q+ip$ and $a^\dagger=q-ip$ can be seen as the ladder operators for a harmonic oscillator with hamiltonian $H=\frac12(p^2+q^2)$.
The other is the fermionic set, which obeys the anticommutation relations $\{c,c^\dagger\}=1$, $\{c,c\}=0=\{c^\dagger,c^\dagger\}$, from which follows that $c^2=0=(c^\dagger)^2$.
The question incorrectly conflates properties of the two sets, i.e. the question as written asserts that 'the' creation operator obeys both $(a^\dagger)^2=0$ and $a^\dagger = q-ip$ where $q$ and $p$ are reasonably-behaved quadratures. There is no such operator: it's either fermionic, with the first property, or bosonic, with the second property.
However, OP's reference for that first property makes a much weaker claim: Gerry & Knight's eq. (2.45) does not require that $(a^\dagger)^2=0$ or that $a^2=0$; instead, it only requires that their expectation values over Fock states vanish, i.e.
$$
⟨n|a^2|n⟩=0=⟨n|(a^\dagger)^2|n⟩.
$$
These two properties are complex conjugates of each other, and they can be rigorously proved from the facts that
$$
a|n⟩=\sqrt{n}|n-1⟩
$$
so therefore
$$
a^2|n⟩=\sqrt{n(n-1)}|n-2⟩
$$
and thus
$$
⟨n|a^2|n⟩=\sqrt{n(n-1)}⟨n|n-2⟩=0.
$$
However, just because the expectation values of an operator vanish in the Fock basis (like they do for $a$ and $a^\dagger$ themselves) does not mean that the operator is identically zero, so e.g. $⟨0|a^2|2⟩ = \sqrt{2}\neq 0$ as a simple example. | {
"domain": "physics.stackexchange",
"id": 44321,
"tags": "quantum-mechanics, quantum-field-theory, operators, hilbert-space"
} |
Free Optics Simulation Programs | Question: I'm having an extremely difficult time finding an optics program that is easy to use and offers accurate physics simulations. I'm not asking for much, I just want to be able to simulate a laser going through a beam splitter and then be able to drag and drop mirrors and angle them to be able to see where the laser beams end up. I want to intersect two laser beams that underwent beam splitting and redirect the beams...
Does anyone know of any free software that can do this?
Answer: For record: It looks like this topic really interests some people: http://markmail.org/message/nic7xrgf5uzed5c4
Newport was obviously thinking in the same direction:
They offer an option to use SketchUp and provide 3D models of their mechanics and lenses—at least they used to, since this page does not exist anymore and the picture above was kindly sent to me by Newport to answer the request in the comment.
Having background in both optical engineering and experimental optics, I can say that real experiments and setups are usually designed with a piece of paper if they are simple or with the professional software if they are not. Real systems very quickly stop being a bunch of mirrors. This is, probably, why nobody is seriously considering creation of such a tool.
In labs, we are usually trying to align all beams parallel or under 90 degrees to each other---not only for ease of work but also because otherwise polarisation effects start being a problem. | {
"domain": "physics.stackexchange",
"id": 76110,
"tags": "optics, resource-recommendations, laser, simulations, software"
} |
Why can we use just one angular velocity vector to describe the rotation of a whole non-inertial reference frame? | Question: The other day in class the professor was explaining non-inertial reference frames. We were working out how to find the acceleration of a point as measured from the non-inertial reference frame, and one problem was finding the time derivatives of the unit vectors as measured from the non-inertial reference frame.
To explain how to do it, he drew a nice diagram that showed that, if $\mathbf{\Omega}$ is the vector representing angular velocity of the rotation of $\hat{\imath}$ (that is, its direction is the axis of rotation), then $\frac{\mathrm{d}\hat{\imath}}{\mathrm{d}t} = \mathbf{\Omega} \times \hat{\imath}$. This makes sense. Next, he said that we can use the same argument for the other unit vectors: $\frac{\mathrm{d}\hat{\jmath}}{\mathrm{d}t} = \mathbf{\Omega} \times \hat{\jmath}$ and $\frac{\mathrm{d}\hat{k}}{\mathrm{d}t} = \mathbf{\Omega} \times \hat{k}$. I don't understand why we use the same $\mathbf{\Omega}$ for all of them. Since their axes of rotation are different, shouldn't we use different angular velocity vectors?
Answer: As Ron pointed out in the comment, it is important to know that there is only one axis of rotation. In a rotating frame, every vector rotates around the same axis, in the same direction, with the same speed.
Now consider this: what makes $\hat{i}$ special in the first place? The answer, of course, is "nothing" - when you set up your coordinate system, you're picking $\hat{i}$ to lie in an arbitrary direction. But you could just as well have set up a different coordinate system in which $\hat{j}$ (or $\hat{k}$) points in that direction. So whatever formula you come up with for the rate of change of $\hat{i}$ is going to work just as well for any other vector. In a sense, the formula doesn't "know" what vector it's being applied to.
Alternatively, of course, you can just do the math to compute the rates of change from scratch, and you'll find out that $\Omega\times\vec{r}$ works for any vector $\vec{r}$. | {
"domain": "physics.stackexchange",
"id": 2939,
"tags": "vectors, inertial-frames, rotation, reference-frames"
} |
Breadth-first and depth-first searches | Question: I searched the net for some bfs and dfs code but rarely found some as reference. I wrote this for an assignment, I am getting the results fine. I want to make my code more compact and remove unwanted stuff or need suggestions to make the code better. I used stack and queue data structures for the implementation
#include <stdio.h>
#include<stdlib.h>
#include <stdbool.h>
#include <string.h>
int F=-1,R=-1;char q[10];
char v[10],check[10];int a[100][100];char stack[10];int top=-1;
char vertex;int pos[10];int s;char Dfsv[10]; int u=9;
int n=0;
int dequeue(){
if(F==-1)
{
printf("underflow\n");
}
else{
vertex=q[F];
printf("%c",vertex);
F++;
if(F<10)
{ return pos[F];}
else{ return -1;}
}
}
void insert(int j){
if(R==9)
{
printf("overflow\n");
}
else if(F==-1&&R==-1)
{ F++;R++;
check[j]='v';
vertex=v[j];
q[R]=vertex;
}
else{ if(check[j]!='v')
{ check[j]='v';R++;
vertex=v[j];pos[R]=j;
q[R]=vertex;}
}
}
void BFS(int s){
int k=s;
while(k!=-1)
{
for(int j=0;j<n;j++)
{
if(a[k][j]==1)
{
insert(j);
}
}
k=dequeue();
}
}
void PUSH(int j){
if(top==9)
{
printf("overflow\n");
}
else{ if(check[j]!='v')
{ check[j]='v';top++;
vertex=v[j];pos[top]=j;
stack[top]=vertex;
}
}}
int POP(){
if (top==-1)
{
printf("underflow\n");
}
else{ Dfsv[u]=stack[top];
top--;u--;
if(top!=-1)
{ return pos[top+1];}
else{ return -1;}
}
}
void DFS(int s){
int k=s;
while(k!=-1)
{
for(int j=0;j<n;j++)
{
if(a[k][j]==1)
{
PUSH(j);
}
}
k=POP();
}
}
void BDFST(){
printf("enter source number\n");
scanf("%d",&s );
vertex=v[s];insert(s);
printf("BFS: ");BFS(s);printf("\nDFS: ");
memset(check, 0, 10);
PUSH(s);
DFS(s);
for(int u=0;u<10;u++)
{
printf("%c",Dfsv[u] );
}
}
void print(int a[100][100],int n,char v[])
{ int j=0;
for(int i=0;i<n;i++)
{
for(j=0;j<n;j++)
{ printf("%c%c[%d][%d]: ",v[i],v[j],i,j);
printf("%d\t",a[i][j]);
}
printf("\n");
}
}
void main(){
int i=0,j=0;
printf("enter number of vertex\n");
scanf("%d",&n);
printf("enter vertex's\n");
scanf("%s",&v);
printf("enter the edges\n");
for( i=0;i<n;i++)
{ printf("enter 1 for edges of %c\n",v[i]);
for(j=0;j<n;j++)
{
printf("%c%c[%d][%d]: ",v[i],v[j],i,j);
scanf("%d",&a[i][j] );
printf("\n");
}
printf("\n");
}
print(a,n,v);
BDFST();
}
```
Answer: Welcome to the Code Review Community.
General Observations
The use of small functions is very good and makes the code more readable, but overall the readability can be improved.
The code does seem to follow the concept of the single responsibility principle.
I'm going to address the code in 2 phases, one is dealing with improving the use of C programming and the second is coding style in general.
The use of structures to represent the queues and the stacks might have decreased the complexity of the code.
For the future it might be good if you provided an example input and expected output.
C Programming Improvements
Check for Warnings
When you initially compile with C use the -Wall compiler flag to list any warnings as well as any errors. In many cases the warnings can indicate possible bugs in the code.
While this code compiles it does have the following warnings using the -Wall flag:
gcc -Wall dfsbfs.c
dfsbfs.c:133:6: warning: return type of ‘main’ is not ‘int’ [-Wmain]
133 | void main(){
| ^~~~
dfsbfs.c: In function ‘main’:
dfsbfs.c:138:14: warning: format ‘%s’ expects argument of type ‘char *’, but argument 2 has type ‘char (*)[10]’ [-Wformat=]
138 | scanf("%s",&v);
| ~^ ~~
| | |
| | char (*)[10]
| char *
dfsbfs.c: In function ‘dequeue’:
dfsbfs.c:22:1: warning: control reaches end of non-void function [-Wreturn-type]
22 | }
| ^
dfsbfs.c: In function ‘POP’:
dfsbfs.c:87:1: warning: control reaches end of non-void function [-Wreturn-type]
87 | }
| ^
The warning about control reaches end of non-void function indicates that the code is missing a return statement if F == -1 in the function dequeue().
The warning about main () indicates that this program is not following the C programming standards about the main() function which should always return an integer value indicating the success or failure of the program to the operating system.
Avoid Global Variables
It is very difficult to read, write, debug and maintain programs that use global variables. Global variables can be modified by any function within the program and therefore require each function to be examined before making changes in the code. In C and C++ global variables impact the namespace and they can cause linking errors if they are defined in multiple files. The answers in this stackoverflow question provide a fuller explanation.
Magic Numbers
There are many Magic Numbers in the global declarations (-1, 10, 100 and 9), it might be better to create symbolic constants for them to make the code more readable and easier to maintain. These numbers may be used in many places and being able to change them by editing only one line makes maintenance easier.
Numeric constants in code are sometimes referred to as Magic Numbers, because there is no obvious meaning for them. There is a discussion of this on stackoverflow.
Readability and Coding Style
Code Consistency
The indentation of the code is inconsistent. This makes the code much harder to read and maintain. For instance it would be better if the function dequeue() looked something like this:
int dequeue(){
if (F == -1)
{
printf("underflow\n");
}
else{
vertex = q[F];
printf("%c", vertex);
F++;
if (F < 10)
{
return pos[F];
}
else
{
return -1;
}
}
}
Some other inconsistencies are that variables are both upper and lower case characters and in most cases are single characters while in some other cases meaningful names. In the C programming language some common practices are that variable and function names are all lower case, and that variable and functions names with multiple words are snake_case. Symbolic constants, which are not used in this program are all upper case.
Horizontal and Vertical Spacing
In most programming languages it is common to improve readability by using horizontal spacing between operators and operands as shown above in the if statements and the printf statements.
The code might be slightly more readable of the open braces { lined up with the closing braces '}', however, this would be determined by local coding standards. | {
"domain": "codereview.stackexchange",
"id": 40601,
"tags": "c, array"
} |
Conditions for Complete hydrolysis of salt | Question: My text book says that the cations (or anions) which are stronger than hydronium ion(or OH-) and their conjugate base (or acid) being very much weaker than water show complete hydrolysis.
While the cations(or anions) which are weaker than hydronium (or OH-)and their conjugate base(or acid) being stronger than water but weaker than hydroxide(or H3O+) exhibit hydrolysis to Limited extent.
I don't quite understand why is this so?
Answer: The question is, in fact, a good one, but the concept of weaker/stronger is confused - and confusing!
A way to describe the situation, using the terms in the question is this: strong acids and strong bases undergo complete hydrolysis in water. The energy gained from hydrogen bonding with a water molecule is great enough to effect complete ionization in water. Examples would be HCl and NaOH.
Water lies in the middle here, being able to be protonated by strong acids and deprotonated by strong bases. Hydrolysis is a measure of how much the water is protonated to H3O+ or deprotonated to OH-.
There are also materials which ionize in water, but not so extensively, e.g., acetic acid and amines such as ammonia. They dissolve, but do not react so much and thus hydrolyze to a limited extent. Confusing, perhaps, that we call these materials weak acids and bases but they have strong internal bonds which prevent their total ionization. These strong internal bonds mean that if you did form the the conjugate acid or the conjugate base (in a salt), they would react strongly with a water molecule to form a considerable amount of the corresponding acid or base. Thus the salts would ionize and hydrolyze where the original acid or base would not be said to hydrolyze (much).
The "strength" of the cation may perhaps be better described as a polarity, which, if it is a "strong", favors hydrolysis, i.e., complete ionization, meaning it is strongly attracted to the oxygens of water and less so to its own anion. By this terminology, Na+ is a strong acid because it is strongly attracted to water (oxygens) at the same time OH- would be a strong base.
A "weak" cation, like NH4+, would be one which will not protonate water to a great extent. A solution of 1 M NH4Cl will give complete ionization to NH4+ and Cl-, but we do not call that hydrolysis. The ammonium ion is stable (strong internal bonds) and only weakly protonates the H2O molecules around it, so we call it a weak base, with limited hydrolysis. | {
"domain": "chemistry.stackexchange",
"id": 13543,
"tags": "equilibrium, ionic-compounds, hydrolysis"
} |
A hotel greeter named Alfred | Question: Is there any way of improving this code? (Here's a JSFiddle with an example and comments)
Example: A hotel greeter can greet guests and accepts tips, but it's impossible to see the total of tips a greeter has received.
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<pre>
[]___
/ /\ O <span id="alfred-says"><!-- the view --></span>
/____/__\ /|\
|[][]|||| / \
ALFRED
</pre>
<button onclick="javascript:alfred('greet')">Arrive at Hotel</button>
<button onclick="javascript:alfred('acceptTip', 10)">Tip Alfred $10</button>
<script>
var HOTEL, alfred;
HOTEL = {}; // Namespace
(function (HOTEL) {
// Reuseable controller
var controllerPrototype = {
greet: function () {
this.model.view.innerHTML = '"' + this.model.greeting + '"';
},
acceptTip: function (tip) {
this.model.tips += tip;
this.model.view.innerHTML = '"' + this.model.thankyou + '"';
}
};
// Create a new greeter
HOTEL.newGreeter = function (view) {
var controller = Object.create(controllerPrototype);
controller.facet = function () {
var args, method;
method = arguments[0];
args = 2 <= arguments.length ? [].slice.call(arguments, 1) : [];
if (method === "facet" || method === "model" || (controller[method] == null)) {
return undefined;
}
// Model, View and real Controller are never exposed
return controller[method].apply(controller, args);
};
controller.model = {
greeting: "Hello",
thankyou: "Thank you",
tips: 0,
view: view
};
return controller.facet;
};
}(HOTEL));
// Alfred only exposes the controller facet. His tips are a secret...
alfred = HOTEL.newGreeter(document.getElementById("alfred-says"));
</script>
</body>
</html>
Goals:
Keep it DRY
Keep it MVC
Make sure anyone with access to a "greeter" can never manipulate the view or model directly.
Limitations:
To avoid clutter, avoid type checking to avoid simple errors unless it is necessary to guarantee integrity.
Some specific thoughts:
Is there a better way of passing model and view into the controller?
Is there a better way of communicating with the controller?
Is there a good way of making a more general view?
Any other improvements?
Answer: From the comments, I guess you would like a theoretical review ;)
This:
<button onclick="javascript:alfred('greet')">Arrive at Hotel</button>
<button onclick="javascript:alfred('acceptTip', 10)">Tip Alfred $10</button>
in my mind should be wired by the controller, that is, the linking of UI elements to data and UI changes
This:
greet: function () {
this.model.view.innerHTML = '"' + this.model.greeting + '"';
},
has your controller access data straight in your view, updating the DOM. This is wrong in my mind. The view should have functions that the controller can call with either the model as a parameter or already passed in advance.
Object.create(controllerPrototype); <- Any reason you are eschewing new and prototype ?
Not that it truly matters but, method === "model" will not catch new String('model')
args = [].slice.call(arguments, 1); works as well as the ternary
All in all, I think this could use some more polishing. | {
"domain": "codereview.stackexchange",
"id": 8769,
"tags": "javascript, design-patterns, mvc"
} |
Linked-list in C without typedefs and globals? | Question: I took RosettaCode's implementation and tried to create something useable (editing their wiki as I go to give-back):
#include <stdlib.h>
#include <stdio.h>
struct elem {
long data;
struct elem *next;
};
Insert
struct elem *addToList(struct elem *node, long num) {
struct elem *iter, *temp;
if (node == NULL) {
node = (struct elem *)malloc(sizeof(struct elem *));
node->data = num;
node->next = NULL;
} else {
iter = node;
while (iter->next != NULL) {
iter = iter->next;
}
temp = (struct elem *)malloc(sizeof(struct elem *));
temp->data = num;
temp->next = NULL;
iter->next = temp;
}
return node;
}
Delete
struct elem *deleteFromList(struct elem *node, size_t pos) {
size_t i = 1;
struct elem *temp, *iter;
if (node != NULL) {
iter = node;
if (pos == 1) {
node = node->next;
iter->next = NULL;
free(iter);
}
else {
while (i++ != pos - 1)
iter = iter->next;
temp = iter->next;
iter->next = temp->next;
temp->next = NULL;
free(temp);
}
}
return node;
}
Print
void printList(struct elem *node) {
struct elem *iter;
puts("List contains following elements : \n");
for (iter = node; iter != NULL; iter = iter->next) {
printf("%ld ", iter->data);
// access data, e.g. with iter->data
}
}
ASans crashes @ node->next = NULL of addToList with heap-buffer-overflow.
What do you think of this implementation? - How can it be improved?
Answer:
Special cases are bad. Eliminate them if possible.
Specifically, an empty list is not all that special.
Is an empty list any more special than an unexpectedly short list?
Extract useful functions if possible. Appending consists of finding the end, and prepending to it, two useful functions.
Do you write C or C++?
In C, casting the result of malloc() is heavily frowned upon, as it means useless repetition.
Avoid sizeof(TYPE).
One easily gets it wrong (as you do!!), it haphazardly sprinkles unchecked duplicate information (the type) about which has to be manually verified, and it impedes refactoring.
Use sizeof *target instead, which properly couples size and use.
Unless you are restricted to strict C90 or earlier, you can mix declarations and instructions. Doing so limits scope and simplifies code.
A pointer has a truth-value, no need to compare to a null pointer constant.
Use concise but precise names.
If you add to a list, where do you add it?
To every element.
Prepended.
Appended.
Behind a given node / index / value.
Randomly.
Arbitrarily.
...
C does not have namespaces, thus the use of prefixes to avoid collisions. Consider an appropriate one.
Dynamic allocation can fail. Deal with it.
Applying it to the first function:
struct elem **list_end(struct elem **p) {
assert(p);
while (*p)
p = &p[0]->next;
return p;
}
struct elem **list_prepend(struct elem **p, long v) {
assert(p);
struct elem *x = malloc(sizeof *x);
if (!x)
return NULL; /* or abort(); */
x->data = v;
x->next = *p;
*p = x;
return &x->next;
}
struct elem **list_append(struct elem **p, long v) {
return list_prepend(list_end(p), v);
} | {
"domain": "codereview.stackexchange",
"id": 43198,
"tags": "c, linked-list"
} |
'Solvers' in Machine Learning | Question: What role do 'Solvers' play in optimization problems? Surprisingly, I could not find any definition for 'Solvers' online. All the sources I've referred to just explain the types of solvers & the conditions under which each one is supposed to be used.
Examples of Solvers - ['Newton-cg', 'lbfgs', 'liblinear', 'sag,' 'saga']
Answer: In other words, you would like to have a clear comparison between all the ML optimisation algorithms you've mentionned?
Here is a quite extended list of diffetent ML optimisation methods, including the ones you've mentionned:
Source: https://www.semanticscholar.org/paper/A-Survey-of-Optimization-Methods-From-a-Machine-Sun-Cao/3119ea9c7ad7a5e044dc7c267329a4bbf00d0158 | {
"domain": "datascience.stackexchange",
"id": 10854,
"tags": "optimization, gradient-descent"
} |
Analogues to Hamilton's equations in Infinitesimal Canonical Transformations | Question: This is from chapter 4 of David Tong's notes on Classical Dynamics (Hamiltonian Formalism).
Let's say you make an infinitesimal canonical transformation (with $\alpha$ as the infinitesimal parameter) as follows
$$q_i\rightarrow Q_i=q_i+\alpha F_i(q,p) \qquad p_i\rightarrow P_i=p_i+\alpha E_i(q,p) \qquad \qquad(1)$$
By some mumbo jumbo of using the Jacobian, he finds out that the functions $F_i(q,p)$ and $E_i(q,p)$ are generated by a "generating function" $G(q,p)$ such that
$$F_i=\frac{\partial {G}}{\partial p_i} \qquad E_i=-\frac{\partial {G}}{\partial q_i} \qquad\qquad(2)$$
He then goes on to conclude that from the above two equations, I quote him "the tangent
vectors to these lines are given by"
$$\frac{ dq_i}{d\alpha}=\frac{\partial {G}}{\partial p_i} \qquad \frac{dp_i}{d\alpha}=-\frac{\partial {G}}{\partial q_i}$$
And he claims that these look just like Hamilton’s equations, with the Hamiltonian replaced by the function $G$ and time replaced by the parameter $\alpha$. So every one-parameter family of canonical transformations can be thought of as “Hamiltonian flow” on phase space for an appropriately chosen “Hamiltonian” $G$.
Now I can't understand how he deduced the above results, especially the last one from the equations (1) and (2). Also, I have no clue how we can conclude the result of (2) without involving the Jacobian. I am completely clueless about this. Any sort of help is appreciated.
Answer: I am not entirely sure of what objections you have, but I will try to explain what Tong is getting at from a different angle, and perhaps then the route he takes through his notes will make more sense. The idea he's trying to get across is roughly that one can view time evolution as being a canonical transformation which is generated by the Hamiltonian function - however, different functions (substituted in place of $H$) will generate different canonical transformations, which can have very deep implications.
1. Hamiltonian Vector Fields
To any smooth function $F(q_i,p_i)$ on the phase space, we can associate the Hamiltonian vector field (not to be confused with the Hamiltonian function) $\mathbf X_F$ given by
$$\mathbf X_F = \pmatrix{\frac{\partial F}{\partial p_1}\\ \vdots \\ \frac{\partial F}{\partial p_N}\\ -\frac{\partial F}{\partial q_1}\\ \vdots \\-\frac{\partial F}{\partial q_N}} = J\nabla F$$
where $J= \pmatrix{0& 1_{N}\\-1_N&0}$ is called the symplectic form, $1_N$ is the $N\times N$ identity matrix, and $\nabla$ is the phase space gradient operator. Don't worry too much about the motivation for this - just note that we can follow this procedure to convert a smooth function into a vector field, which looks like the gradient except (i) the $q$'s and $p$'s are switched, and (ii) the $q$'s get a minus sign.
For a 2D phase space, which I'll use for the remainder of this answer, this takes the form
$$X_F = \pmatrix{\frac{\partial F}{\partial p}\\-\frac{\partial F}{\partial q}} = J\nabla F$$
where $J=\pmatrix{0&1\\-1&0}$.
2. Integral Curves
An integral curve of a vector field $\mathbf X$ is what you get when you basically follow the arrows of $\mathbf X$ at every point to define a path. More precisely, $\mathbf r(\lambda) = \big(q(\lambda),p(\lambda)\big)$ is called an integral curve of a vector field $\mathbf X$ if the tangent vector to the curve at a particular point is equal to the vector of $\mathbf X$ attached to that point:
$$\mathbf r'(\lambda) = \pmatrix{q'(\lambda)\\q'(\lambda)} =\pmatrix{X^1\big(q,p\big)\\X^2\big(q,p\big)}$$
An example is in order. Consider the vector field $\mathbf X(q,p) = \pmatrix{-q\\p}$. We will construct an integral curve from the starting point $(q_0,p_0)$. We seek some $\mathbf r(\lambda)=\pmatrix{q(\lambda)\\p(\lambda)}$ such that
$$\mathbf r'(\lambda) = \pmatrix{q'(\lambda)\\p'(\lambda)}=\pmatrix{-q\\p}$$
So we have two equations:
$$q'(\lambda) = -q \implies q(\lambda)=c_1 e^{-\lambda}$$
$$p'(\lambda) = p \implies p(\lambda) = c_2e^{\lambda}$$
Applying the initial condition $\mathbf r(0) =\pmatrix{q_0\\p_0}$ yields the integral curve
$$\mathbf r(\lambda) = \pmatrix{q_0e^{-\lambda}\\p_0e^{\lambda}}$$
I've plotted the integral curves of this vector field for several choices of initial point.
3. Flows
With this idea of integral curves in mind, we can define a map which takes points in the phase space and "pushes" them along the flow to new points in phase space. The flow $\Phi_{\mathbf X}^\lambda$ is a function which eats a starting point $(q_0,p_0)$ and sends it to the point $\big(q(\lambda),p(\lambda)\big)$ which you get by following the integral curve of $\mathbf X$ for a distance $\lambda$. Explicitly, using the vector field from the last example,
$$\Phi_\mathbf{X}^\lambda (q_0,p_0) = (q_0 e^{-\lambda},p_0e^{\lambda})$$
4. Hamiltonian Flows
Now we can chain those two ideas together. Given a smooth function $F(q,p)$, we can obtain the Hamiltonian vector field
$$\mathbf X_F = \pmatrix{\frac{\partial F}{\partial p}\\-\frac{\partial F}{\partial q}}$$
We can then use the integral curves of this vector field to define the associated Hamiltonian flow $\Phi_F^\lambda$. We say that the function $F$ generates the flow. The flow we obtained in part 4 is an example of a Hamiltonian flow generated by the smooth function $F(q,p)=-qp$, as you can quickly check.
The rate of change of some other function $K$ as you follow the flow is seen to be
$$\frac{d}{d\lambda} K\big(q(\lambda),p(\lambda)\big) = \frac{\partial K}{\partial q} q'(\lambda) + \frac{\partial K}{\partial p}p'(\lambda)$$
but, since the flow is generated by the function $F$, we have
$$\pmatrix{q'(\lambda)\\p'(\lambda)} = \pmatrix{\frac{\partial F}{\partial p}\\-\frac{\partial F}{\partial q}}$$
so
$$\frac{d}{d\lambda} K\big(q(\lambda),p(\lambda)\big) = \frac{\partial K}{\partial q}\frac{\partial F}{\partial p} - \frac{\partial K}{\partial p} \frac{\partial F}{\partial q} \equiv \{K,F\}$$
where $\{\bullet,\bullet\}$ is the Poisson bracket.
5. The Punchline
The point that Tong is making is that every smooth function $F$ on phase space generates a Hamiltonian flow $\Phi_F^\lambda$, and that these Hamiltonian flows are, in fact, canonical transformations for every choice of $\lambda$. Therefore, smooth functions generate entire families of canonical transformations which you get by following the flow for some chosen distance.
Though it may not seem like it, this is a deep and beautiful idea which provides the entire structure of Hamiltonian mechanics. First and foremost, note that the flow generated by the Hamiltonian function itself pushes points in phase space forward in time (that is, the parameter $\lambda$ should be interpreted as the time). So right off the bat, we see that once we have defined this structure, we can say simply that $H(q,p)$ generates the flow corresponding to time evolution.
There is more to the story, however. Note that if we choose the function $P(q,p)=p$ rather than the Hamiltonian, the resulting flow gives (exercise for the reader) $\big(q(\lambda),p(\lambda)\big) = (q_0+\lambda,p_0)$. We therefore say that the momentum function $P$ generates spatial translations. It is a good exercise to show that $L_z$, the $z$-component of the angular momentum, generates spatial rotations about the $z$-axis.
If you've taken a quantum mechanics course, this should sound familiar. The standard formulation of quantum mechanics uses precisely the same structure (the Hamiltonian generating time evolution, momentum operators generating spatial translation, angular momentum operators generating rotations, etc). | {
"domain": "physics.stackexchange",
"id": 68646,
"tags": "classical-mechanics, coordinate-systems, hamiltonian-formalism, vector-fields, phase-space"
} |
Quantum error correction using bit-flip code for the amplitude damping channel | Question: I do not understand the error correction process that uses quantum codes for amplitude damping channel. I will take three bit-flip code for example.
The logical state of a three bit-flip code is
$|0\rangle_L=|000\rangle$,$|1\rangle_L=|111\rangle$ with stabilizers $Z_1Z_2, Z_2Z_3$.
The amplitude damping channel on three qubits $\mathcal{E}^{\otimes 3}$ has Kraus operators $\{E_0^{\otimes 3},E_0E_1^{\otimes 2},E_0E_1E_0,E_0^{\otimes 2}E_1,E_1^{\otimes 3},E_1E_0^{\otimes 2},E_1E_0E_1,E_1^{\otimes 2}E_0\}$,
where
\begin{matrix}
E_0=\begin{pmatrix}
1 & 0 \\
0 & \sqrt{1-r}
\end{pmatrix},
E_1=\begin{pmatrix}
1 & \sqrt{r} \\
0 & 0
\end{pmatrix}.
\end{matrix}
Suppose I have a initial state $|\psi\rangle=|111\rangle$ and error $E_1E_0^2$ will transform $|\psi\rangle$ to $\sqrt{r}(1-r)|011\rangle$. By measuring the stabilizers $Z_1Z_2, Z_2Z_3$, I can find the first qubit is flipped. So I use $X_1$ to recover.
My question is that
the state could only be corrected back to $\sqrt{r}(1-r)|111\rangle$ after the recovery, can $\sqrt{r}(1-r)$ be recognized as the fidelity?
if I write the initial state in density matrix $\rho=|111\rangle\langle 111|$ and
\begin{equation}
\begin{aligned}
\mathcal{E}^{\otimes 3}(\rho)&=E_0^3 \rho (E_0^3)^\dagger+E_1E_0^2 \rho (E_1E_0^2)^\dagger+\cdots \\&= r^3|000\rangle\langle 000|+r(1-r)^2|011\rangle\langle 011|+\cdots.
\end{aligned}
\end{equation}
How to perform projective measurements to obtain error syndromes and correct?
$[5,1,3]$ code can correct one arbitrary error but amplitude damping errors will occur on all $5$ qubits, does that mean $[5,1,3]$ code will have bad performance for amplitude damping errors?
Answer: Before starting, I should probably emphasise that, although useful for the practice of working through the maths of quantum error correction on a relatively simple case, amplitude damping combined with the repetition code is a really bad thing to be thinking about. This is because, if there's an error, and they you apply a syndrome measurement, so that you detect there's one bit that is different from the other two, then you know that your initial state was $|111\rangle$ and not $|000\rangle$ because under amplitude damping noise, $|000\rangle$ never changes. This means that your syndrome measurement, if it detects an error, constitutes a measurement on your encoded qubit. It destroys any of the coherence you were trying to protect!
Because of the issue described above, this question doesn't make the sort of sense you'd like it to. However, remember that there are measurements going on. When you measure, you get a probability of getting a given outcome, and the output state is renormalised. At this point, it is usually the probability of success that is evaluated (sum of the probabilities for measurement outcomes which are successful in correcting the error) rather than a fidelity.
How you perform the measurement has nothing to do with the state. Perhaps you feel unfamiliar with the formalism because it's applied to a density matrix rather than a pure state. Still, it's a sequence of applying unitaries ($\rho\mapsto U\rho U^\dagger$) and projections (probability $p_i=\text{Tr}(P_i\rho)$ and output state $P_i\rho P_i/p_i$).
It's not the issue that the errors could apply to all 5 qubits that's the problem. You should still think about these errors as an independent probability $r$ of the amplitude damping error acting on each qubit (if it's in the $|1\rangle$ state) and $(1-r)$ of not happening. So, there's a case of no error with probability $(1-r)^5$ and 5 cases of just one error happening, each of probability $r(1-r)^4$. All of these are cases that can be corrected by the 5-qubit code. So, the idea is that provided $(1-r)^5+5r(1-r)^4>(1-r)$, it is less likely that there's a error on the logically encoded qubit than there would have been on a single qubit with no encoding. That makes it a worthwhile procedure. | {
"domain": "quantumcomputing.stackexchange",
"id": 1143,
"tags": "error-correction, stabilizer-code, projection-operator"
} |
Does there exist a resource for vetting banned words for chatbots? | Question: So, Tay the racist tweeter bot... one thing that could have prevented this would have been to have a list of watchwords to not respond to, with some logic similar to foreach (word in msg) {if (banned_words.has(word)) disregard()}.
Even if that wouldn't, what I'm getting at is obvious: I am building a chatterbot that must be kid-friendly. For my sake and for the sake of whoever finds this question, is there a resource consisting of a .csv or .txt of such words that one might want to handle? I remember once using a site-blocking productivity extension that had visible its list of banned words; not just sexually charged words, but racial slurs, too.
Answer: I have not found one other than scraping a few pages from Urban Dictionary, I built my list via crowdsourced style and got a number of interesting words I had not considered.
Start with the worst words you can think of, then try slang and accidental or on purpose misspellings of them | {
"domain": "ai.stackexchange",
"id": 1828,
"tags": "chat-bots, resource-request"
} |
Question about bundle adjustment | Question: When doing bundle adjustment do landmarks gets optimized as stand alone variables or are they viewed as a function of the robots first position when they are detected?
Answer: The points are assumed to be static in the global frame and optimized in the global frame. Each time the point is detected within the camera frame a measurement is taken relative to the camera. The process of reconciling the successive measurements of the same point across multiple frames by minimizing the reproduction error in each frame is what refines the location of the points in 3D space.
For more see: https://arxiv.org/pdf/1912.03858.pdf#:~:text=Bundle%20adjustment%20describes%20the%20sum,world%20frame)%20and%20camera%20parameters. | {
"domain": "robotics.stackexchange",
"id": 2619,
"tags": "slam, computer-vision, visual-odometry"
} |
Is there any way to carry out voltammetry without a potentiostat? | Question: I'm trying to do a voltammetry experiment, but my high school doesn't want to have to buy a potentiostat (understandably, I know they are very expensive). Is there any way to carry out a voltammetry experiment, either linear sweep or cyclic, without a potentiostat? Or am I doomed to have to go back to the drawing board...
I should mention that the physics lab has some equipment like variable power sources, but not much--my school kind of under-appreciates science--and I was planning on creating a cruddy makeshift three-electrode cell.
Answer: This is a valid question. This project is entirely possible, but I would not advise it for a high school research project based on my experience building these. Much better projects are available.
A nonreactive electrode, for example gold or platinum, is required for good results.
Knowledge of op-amp design, or some other feedback control system, which is taught in advanced electronics courses is needed. The control system fixes the potential between the the reference and the working electrode by drawing current from the third electrode. The current drawn is measured to give some information about the system.
The Wikipedia article shows the circuit need, https://en.wikipedia.org/wiki/Potentiostat.
The open source cheapstat is available: http://web.chem.ucsb.edu/~kwp/cheapstat/
Integrated versions of the circuit are available. http://www.ti.com/product/LMP91000. Evaluation kit ~$100 http://www.ti.com/tool/LMP91000EVM.
Information on making electrodes: https://publiclab.org/notes/JSummers/01-09-2014/potentiostat-notes-5-how-to-make-low-cost-electrodes
It is possible to build a potentiostat without advanced knowledge of electronics; however, this runs into costs. Getting experiments done with the equipment, resources, and funding available is something every scientists faces. | {
"domain": "chemistry.stackexchange",
"id": 5106,
"tags": "electrochemistry"
} |
In the context of image binary classification, is it necessary to divide dataset into true positive, false positive, true negative, false negative? | Question: I am working through this course. It seems that the professor is not dividing the dataset into true positive, false positive, true negative, and false negative.
In the context of image binary classification, is it necessary to divide the dataset into true positive, false positive, true negative, and false negative?
Answer: No it's not. The numbers of true positives, false positives, true negatives, false negatives, are something you can check from a confusion matrix when you evaluate the performance of your trained model on a test set. It's useful to know where your model is making more mistakes.
It's not something you can do before training. | {
"domain": "datascience.stackexchange",
"id": 5322,
"tags": "machine-learning, deep-learning, logistic-regression"
} |
Prove complement a^nb^nc^n is contextfree | Question: So the complement of L1 = $\{a^{n}b^{n}c^{n} \mid n \geq 1\}$ would be L2 = $\{a,b,c\}^* \setminus \{a^{n}b^{n}c^{n} \mid n \geq 1\}$.
In other words, any combinations of a,b and c where we dont have an equal number of all three letters and w = $\varepsilon$ is also legit.
However, while I'm certain that there should be a contextfree grammar for L2, I can't seem to find a grammer that allows you to generate the terminals freely without allowing $a^{n}b^{n}c^{n}$ with n $\geq$ 1.
My attempt was to make a starting Rule S -> $Q_{_{a}}$ ; $Q_{_{b}}$ ; $Q_{_{c}}$ ; $\varepsilon$
so that the individual Q rules would make it possible for two of the terminals a, b and c to have an equal number, but not for the third. (in $Q_{_{a}}$, a is restricted by max(b,c), in $Q_{_{b}}$, b is restricted by max(a,c), in $Q_{_{c}}$, c is restricted by max(a,b) so that the restricted terminal can never show up as often as the unrestricted terminal with the highest count)
The rules I set up though, only allow to have unlimited numbers of unrestricted terminals in any order. I'm not sure how to implement a rule for the restricted terminal, without allowing it to have as high a count as the unrestricted ones, if the unrestricted ones have the same count.
here's my P for G$_{_{L2}}$ so far
S $\rightarrow$ $Q_{_{a}}$ ; $Q_{_{b}}$ ; $Q_{_{c}}$ ; $\varepsilon$
$Q_{_{a}}$ $\rightarrow$ $Q_{_{a}}$b $Q_{_{a}}$ ; $Q_{_{a}}$c $Q_{_{a}}$ ; $\varepsilon$
$Q_{_{b}}$ $\rightarrow$ $Q_{_{b}}$a $Q_{_{b}}$ ; $Q_{_{b}}$c $Q_{_{b}}$ ; $\varepsilon$
$Q_{_{c}}$ $\rightarrow$ $Q_{_{c}}$a $Q_{_{c}}$ ; $Q_{_{c}}$b $Q_{_{c}}$ ; $\varepsilon$
These are still missing the rules for the individual restricted variable.
In what fashion can I add those, without breaking the [ $a^{n}b^{n}c^{n}$ | n = 0 ] rule?
Edit: it occured to me that I could refine the restricting rules, such that the restricted terminal can be derived from a rule if, and only if one (and only one) of the unrestricted ones is always created with it. For example:
S $\rightarrow$ $Q_{_{a}}$ ; $Q_{_{b}}$ ; $Q_{_{c}}$ ; $\varepsilon$
$Q_{_{a}}$ $\rightarrow$ $Q_{_{ab}}$b $Q_{_{ab}}$ ; $Q_{_{ac}}$c $Q_{_{ac}}$
$Q_{_{ab}}$ $\rightarrow$ $Q_{_{ab}}$b $Q_{_{ab}}$ ; $Q_{_{ab}}$c $Q_{_{ab}}$ ; $Q_{_{ab}}$ a $Q_{_{ab}}$ b $Q_{_{ab}}$ ; $Q_{_{ab}}$ b $Q_{_{ab}}$ a $Q_{_{ab}}$ ; $\varepsilon$
$Q_{_{ac}}$ $\rightarrow$ $Q_{_{ac}}$b $Q_{_{ac}}$ ; $Q_{_{ac}}$c $Q_{_{ac}}$ ; $Q_{_{ac}}$ a $Q_{_{ac}}$ c $Q_{_{ac}}$ ; $Q_{_{ac}}$ c $Q_{_{ac}}$ a $Q_{_{ac}}$ ; $\varepsilon$
At this point my brain starts running in circles. Would this set me on the right path?
Answer: You're overcomplicating. There is no need for the grammar to be deterministic, so it's OK for cases to overlap.
Say $\omega \in \overline{L_1}$. At least one of the following statements must be true:
$\omega = a^ib^j\nu \text{ where } i \ge 1, j\ge 0 \text{ and } i \ne j, \text{ and } \nu \text{ does not start with } b$
$\omega = a^ib^jc^k \text{ where } i, j, k \ge 1, \text{ and }j \ne k$(I don't require $i = j$, although if $i \ne j$ then the first condition also applies.)
$\omega \text{ does not start with } a$
$\omega \text{ contains } ba, ca \text{ or }cb$
These all have reasonably simple grammars.
Just in case it's not obvious, there are a few ways to do $a^jb^k | j\ne k$. One simple one:
$$\begin{align}
S &\to A \mid B \\
A &\to aC \mid aA \\
B &\to Cb \mid Bb \\
C &\to \epsilon \mid aCb
\end{align} $$
$C$ is a balanced sequence of $a$ and $b$, $A$ has more $a$s, $B$ has more $b$s, and $S$ has either more $a$s or more $b$s. | {
"domain": "cs.stackexchange",
"id": 19080,
"tags": "context-free, formal-grammars"
} |
DOM building using DocumentFragment | Question: I want to build the markup below by iterating on an array of objects. The structure of the object is also listed below. I need some tips on how to make it clean.
Markup I want to build:
<!--the markup I want to build-->
<div class="pull-left col-xs-4">
<div class="thumbnail">
<div class="caption">
<h5>Crowne Plaza Hotel</h5>
</div>
<img class="img-responsive" src="img/crown-plaza.jpg" alt="Crowne Plaza Hotel">
<div class="caption">
<h3 class="pull-right">
<small>
<s>S$ 275</s>
</small>S$ 275</h3>
<div class="clearfix"></div>
</div>
</div>
</div>
JavaScript:
//the object structure of each data
{
name: "Hotel A",
loc: "Prontera Rd",
dist: 9,
img: "img/rendezvous-hotel.jpg",
price: 350,
deal_price: 350,
currency: "S$",
star_rating: 4
}
function constructDOMFragment(data) {
var fragment = document.createDocumentFragment();
for (var i = data.length - 1; i >= 0; i--)(function(obj) {
var div_container = document.createElement("div");
div_container.className = "pull-left col-xs-4";
var div_thumbnail = document.createElement("div");
div_thumbnail.className = "thumbnail";
div_container.appendChild(div_thumbnail);
var div_caption_top = document.createElement("div");
div_caption_top.className = "caption";
var h5 = document.createElement("h5");
h5["innerHTML" || "textContent"] = obj.name;
div_caption_top.appendChild(h5);
div_thumbnail.appendChild(div_caption_top);
var img = document.createElement("img");
img.className = "img-responsive";
img.src = obj.img;
img.alt = obj.name;
div_thumbnail.appendChild(img);
var div_caption_bottom = document.createElement("div");
div_caption_bottom.className = "caption";
var h3_deal_price = document.createElement("h3");
h3_deal_price.className = "pull-right";
if (obj.price !== undefined && obj.price && obj.price != obj.deal_price) {
var price = document.createElement("small");
var strikethrough = document.createElement("s");
strikethrough["innerHTML" || "textContent"] = obj.currency + " " + obj.price;
price.appendChild(strikethrough);
h3_deal_price.appendChild(price);
}
h3_deal_price.appendChild(document.createTextNode(" " + obj.currency + " " + obj.price));
div_caption_bottom.appendChild(h3_deal_price);
var clearfix = document.createElement("div");
clearfix.className = "clearfix";
div_caption_bottom.appendChild(clearfix);
div_thumbnail.appendChild(div_caption_bottom);
fragment.appendChild(div_container);
})(data[i]);
var clearfix = document.createElement("div");
clearfix.className = "clearfix";
fragment.appendChild(clearfix);
return fragment;
};
Answer: Interesting question,
As @rotora mentioned, h5["innerHTML" || "textContent"] cannot work, you are not testing cross browser, you would have caught this otherwise
Naming, div_caption_bottom is an unfortunate name for 3 reasons
This should be lowerCamelCase, so divCaptionBottom
The tag type div is an implementation detail, are you going to rename this to span CaptionBottom is you starting using spans? I would rather see CaptionBottom
I was lying, bottomCaption ( adjectiveNoun ) reads much better
From a 'Dont Repeat Yourself' perspective, you are mostly doing
Creating elements with a tag type
Assigning class names
Adding elements to other elements
If you were to use a function like this (untested, but you should grasp where this is going):
function createStyledElement( tag , className ){
var element = document.createElement( tag );
element.className = className;
element.addStyledElement = createStyledElement;
if( this instanceof HTMLElement )
{
this.appendChild( element );
}
return element;
}
Then your code would look more like this:
var container = createStyledElement( 'div' , 'pull-left col-xs-4' );
var thumbnail = container.addStyledElement( 'div' , 'thumbnail' );
var captionTop = thumbnail.addStyledElement( 'div' , 'caption' );
var header5 = captionTop.addStyledElement( 'h5' , 'caption' );
header5.innerHTML = header5.innerHTML = obj.name;
var img = thumbnail.addStyledElement( 'img' , 'img-responsive' );
img.src = obj.img;
img.alt = obj.name;
var captionBottom = thumbnail.addStyledElement( 'div' , 'caption' );
From a performance perspective, the speed of building HTML through templates or strings changes over time and is different per browser. You should check out templating libraries, but possibly your code is the right way to go. It's hard to tell.
Also from a performance perspective, creating functions in a loop, is a no-no
Finally, jshint.com found almost nothing to complain about besides some missing semicolons.
Minor update on templating
From your comment, you might be overestimating the effort templating takes.
If you define your template like this in your HTML:
<div id="hotelTemplate" style="display:none">
<!--the markup I want to build-->
<div class="pull-left col-xs-4">
<div class="thumbnail">
<div class="caption">
<h5>~</h5>
</div>
<img class="img-responsive" src="~" alt="~">
<div class="caption">
<h3 class="pull-right">
<small>
<s>~</s>
</small>~</h3>
<div class="clearfix"></div>
</div>
</div>
</div>
</div>
And then use for example this simple template filler
function fillTemplate( s )
{ //Replace ~ with further provided arguments
for( var i = 1, a = s.split('~'), s = '' ; i < arguments.length ; i++ )
s = s + a.shift() + arguments[i];
return s + a.join("");
}
Then you can do something like this:
var template = document.getElementById('hotelTemplate').innerHTML;
var html = fillTemplate( template , obj.name , obj.img , obj.name , obj.deal_price , obj.price );
var div = document.createElement( 'div' );
div.innerHTML = html;
fragment.appendChild( div ); | {
"domain": "codereview.stackexchange",
"id": 8317,
"tags": "javascript, design-patterns, html, dom"
} |
What's the name of the point where you are closest or furthest from the ecliptic? | Question: Every 24 hour day, there is a point as the earth rotates about its axis, where a location on earth is the closest to the ecliptic and where it's the furthest. Do those two points have a name?
What I mean is on the plane of the ecliptic, as the earth rotates around its axis, a point on the earth at 45 deg north is going to hit a low-point perpendicular to that plane, and will hit a high point perpendicular to that plane.
Answer: I don't know of a standard term for that quantity, but I'd call it the ecliptic latitude of the zenith.
With north positive and south negative, this would reach a minimum at 6:00 and a maximum at 18:00 local sidereal time.
These correspond to local solar noon or midnight at the solstices and occur 2 hours earlier per month.
The vectors perpendicular to the ecliptic are called the ecliptic poles. The zenith only coincides with one of these on the Arctic or Antarctic circle, at sunrise (winter-spring) or sunset (summer-fall). | {
"domain": "astronomy.stackexchange",
"id": 2046,
"tags": "ecliptic"
} |
Game where two players take turns picking numbers until a target sum (part 2) | Question: Here is a continued discussion (Game where two players take turns picking numbers until a target sum) from here since my previous code has bug, I fixed it with totally new algorithm implementation.
Working on the problem of sum game. Suppose we have consecutive numbers from 1 to n, and an expected sum number, there are two players, and each player tries to select one number in turn (once a number is selected by one player, the other player cannot select it again), and the selected numbers are summed together. The player who first get a sum which is >= expected sum wins the game. The question is trying to find if first player has a force win solution (means no matter what numbers 2nd player will choose each time, the first player will always win), and also try to find all possible force win solutions of player 1.
For example, if we have numbers from 1 (inclusive) to 5 (inclusive) and expected sum is 7, if the first player select 1, no matter what the 2nd player select, in the final the first player will always win.
Areas which I want to improve, but do not know how in my below code, asking for advice,
I think the use of first_player_initial_choice is a bit ugly, but I do not know how to remove it;
I check len(path) == 1 to see if it is first choice by player 1, so that I can record when player 1 force win, what is the first selected number -- which could be used for further filter purpose, it seems a bit ugly as well;
I tried to find all possible winning solution of player 1, when try to filter (by using if result[0] in first_player_initial_choice to see if winning soluton of player 1 is actually a player 1 force win solution.) It seems a bit ugly as well, but I cannot figure out a way which pick-up only player 1 force win solution only in an efficient recursive solution way.
My code,
results = []
first_player_initial_choice = set()
def force_win(numbers, flags, current_value, expected_value, path):
current_win = False
for i in range(len(flags)):
if not flags[i]:
if numbers[i] + current_value >= expected_value:
path.append(numbers[i])
if len(path) % 2 != 0:
results.append(path[:])
if len(path) == 1:
first_player_initial_choice.add(numbers[i])
path.pop(-1)
current_win = True
else:
flags[i] = True
path.append(numbers[i])
opponent = force_win(numbers, flags, current_value+numbers[i], expected_value, path)
if len(path) == 1 and not opponent:
first_player_initial_choice.add(numbers[i])
path.pop(-1)
flags[i] = False
if not opponent:
current_win = True
# for
return current_win
if __name__ == "__main__":
win = force_win([1,2,3,4,5],[False]*5, 0, 8, [])
print win
if win:
final_result = []
print results
print first_player_initial_choice
for result in results:
if result[0] in first_player_initial_choice:
final_result.append(result)
print final_result
Answer: About the algorithm
If you want to check for a force win on the first turn, you just need to check what happens when player one picks the lowest number and player two picks the remaining highest number.
If you want to check for a force win on the other turns you can repeat the check by removing numbers from the list and check the remaining highest and lowest numbers.
About the code
It took me a while to get how your code worked, I think it's overly complicated. You have flags to determine if a number has been picked, but if you really want to keep a copy of the original numbers (which you don't actually need) you can just, well, copy it.
I still don't really get the path variable. You use that to determine whose turn it is, but you don't need to know that, you only need to know if player one has a forced win.
This is how I'd go about it for a solution. Keep in mind that I didn't test every corner case, but it should give you an idea about what I mean:
def force_win(numbers, current_sum, expected_value):
# Numbers are assumed to be positive integers
# For simplicity I assume there are no duplicates
# Otherwise be careful if min_value == max_value
min_value = min(numbers)
max_value = max(numbers)
current_sum += min_value + max_value
if (current_sum >= expected_value):
return False
numbers.remove(min_value)
if (max_value in numbers):
numbers.remove(max_value)
if (len(numbers) == 0):
return False
if (min_value + max_value < expected_value):
if ((current_sum + min_value) >= expected_value):
return True
force_win(numbers, current_sum, expected_value)
return False
if __name__ == "__main__":
print force_win([1,2,3,4,5], 0, 7) | {
"domain": "codereview.stackexchange",
"id": 22984,
"tags": "python, algorithm, python-2.x"
} |
Why did my glass pot explode when I boiled sugar in it and added cold milk? | Question: I decided to try some cooking with sugar and put a glass bowl on the flame with half a cup of sugar in it and some water. After bubbles appeared I stirred the sugar for about a minute and then I decided to add milk, and as soon as I poured the milk, everything exploded and the glass pot shattered into little pieces rapidly.
What just happened? Why did my glass pot explode when I boiled sugar in it and added cold milk? And does this also happen with laboratory equipment?
Answer: Assuming initially your glassware contained a cup of sugar and a little of water, you basically prepared caramel. Depending on the sugar employed, temperatures needed to trigger caramelization vary, but $\pu{160 ^\circ{}C}$ ($\pu{320 F}$) mentioned succrose, as example (which is the normal household sugar) is quite high. Most materials, including glass, dilate upon heating, which is fine as long as this is evenly done.
Now if you add cold milk, say of $\pu{20 ^\circ{}C}$, the glass aims to contract back to its initial state. Because of the quantity of milk added, and the large heat capacity of water as a major constituent of milk, milk served momentarily as considerable heat sink. Now taking into consideration that the glass ware's walls are unevenly heated -- at the outside still heated by gas or stove to more than $\pu{160 ^\circ{}C}$, and the inner of $\pu{20 ^\circ{}C}$, this suddenly generates a lot of mechanical strain and stress on the material. The more the walls of your glass ware are thick, the more easy these may then crack and shatter into pieces just by the sudden temperature change. (For the same token, you place hot glass ware on a plank of wood to allow slow cooling to room temperature.)
In addition, if the glass ware were closed tightly just after addition of the cold milk, the hot ($\pu{160 ^\circ{}C}$) is able to boil off the water in the milk, too; generating steam that likes to expand, or -- if confined in volume -- will build up pressure. Under normal circumstances, for each litre of (liquid) water, up to $\pu{1.7 m^3}$ of steam may be generated (at normal pressure). This represents an additional stress for the material, and standard kitchen glass ware is not designed to withstand such pressures. | {
"domain": "chemistry.stackexchange",
"id": 12456,
"tags": "equipment"
} |
Time-reversal and delay commutativity and resulting signals | Question: I'm reading Schaum's DSP book, and in Fig 1-3 they demonstrate why shifting and reversal are order-dependent, showing a couple of simple systems, 1) delay followed by reversal, and 2) reversal followed by delay.
The equations they present as follows:
1) (Time delay -> Reversal)
$x(n) \rightarrow x(n-n_0) \rightarrow x(-n-n_0)$
2) (Reversal -> Time delay)
$x(n) \rightarrow x(-n) \rightarrow x(-n+n_0)$
I'm fine with shifting and reversal not being commutative due to the different results above, but shouldn't the results be opposite? I.e.:
1) (Time delay -> Reversal)
$x(n) \rightarrow x(n-n_0) \rightarrow x(-(n-n_0)) = x(-n + n_0)$
2) (Reversal -> Time delay)
$x(n) \rightarrow x(-n) \rightarrow x(-n - n_0)$
I didn't find this in the errata for the book which is why I'm thoroughly confused. Which is correct?
Answer: Your counterexample to the book's assertion
is confusing between two different uses for $n$. There was a question earlier
in which some user (endolith? datageist?) gave an answer containing
a detailed description of what exactly
this confusion is and how to interpret the results correctly. My cursory search
has not found this great answer, and so I will just exhibit
a simple example.
Suppose that
$x[n] = \begin{cases}1, &n=0,\\2, & n = 1,\\0, &\text{otherwise,}\end{cases}$
Delaying $x[n]$ by one unit gives
$x_d[n] = \begin{cases}1, &n=1,\\2, & n = 2,\\0, &\text{otherwise,}\end{cases}$
and time-reversing this gives
$x_{d,r}[n] = \begin{cases}1, &n=-1,\\2, & n = -2,\\0, &\text{otherwise,}\end{cases}$ which equals $x[-n-1]$ exactly as the book says it does. Don't believe this?
Here is a table of values for the two functions:
$$\begin{array}{|l|c|c|c|c|c|c|c|c|c|}
\hline\\n&\cdots&-3&-2&-1&0&1&2&3&\cdots\\
\hline\\
x[n]&0&0&0&0&1&2&0&0&0\\
\hline\\x_{d,r}[n]&0&0&2&1&0&0&0&0&0\\
\hline\\
-n-1&\cdots&2&1&0&-1&-2&-3&-4&\cdots\\
\hline\\
x[-n-1]&\cdots&0&2&1&0&0&0&0\\
\hline\\
\end{array}$$
OK, thus far? On the other hand,
Time-reversing $x[n]$ gives
$x_r[n] = \begin{cases}1, &n=0,\\2, & n = -1,\\0, &\text{otherwise,}\end{cases}$
and delaying this by one time unit gives
$x_{r,d}[n] = \begin{cases}2, & n = 0,\\1, &n=1,\\0, &\text{otherwise,}\end{cases}$
which equals $x[-n+1]$ exactly as the book says it does. Don't believe this either? Here
is the table for it.
$$\begin{array}{|l|c|c|c|c|c|c|c|c|c|}
\hline\\n&\cdots&-3&-2&-1&0&1&2&3&\cdots\\
\hline\\
x[n]&0&0&0&0&1&2&0&0&0\\
\hline\\x_{r,d}[n]&0&0&0&0&2&1&0&0&0\\
\hline\\
-n+1&\cdots&4&3&2&1&0&1&2&\cdots\\
\hline\\
x[-n+1]&0&0&0&0&2&1&0&0&0\\
\hline\\
\end{array}$$
So how can we establish these results without setting up tables?
Well, $x_d$ is a sequence whose $n$-th term $x_d[n]$ equals $x[n-n_0]$
for all choices of $n$; in other words, take whatever is inside the square brackets
after $x_d$, subtract $n_0$ from it, and stick it in as the argument/index
for $x$. Now, for
any sequence $y$, its time-reversal $y_r$ is a sequence for which
$y_r[k] = y[-k]$ for all choices of $k$ (negate the argument of $y_r$ and
stick it in as the argument for $y$. Thus, the time-reversal of
$x_d$ is given by
$$x_{d,r}[k] = (x_d)_r[k] = x_d[-k] = x[(-k)-n_0] = x[-k-n_0]$$
as the book says, and not $x[-k+n_0]$ as the OP claims it should be.
Similarly, $x_r[n] = x[-n]$ and so
$$x_{r,d}[k] = (x_r)_d[k] = x_r[k-n_0] = x[-k+n_0]$$
as the book says it should be, and not $x[-k-n_0]$ as the OP
claims it should be. | {
"domain": "dsp.stackexchange",
"id": 10749,
"tags": "discrete-signals, dsp-core"
} |
Why do the units of entropy include energy and temperature? | Question: I am trying to learn what entropy actually is, and I read this answer about how entropy is the information needed to specify a full quantum state. However, if entropy is just information, why does it have units related to energy and temperature? So I was wondering if someone could explain how information can have these units.
Answer: Historically, entropy was defined in terms of heat transfer and temperature. The connection to disorder and entropy came much later. You can, in fact, define entropy to be a dimensionless quantity, and many people do, but the historical units are still generally used in thermodynamic calcualtions.
The First Law of Thermodynamics, which is just conservation of energy, states that the internal energy change of a system is related to the mechanical work done and the heat exchanged: $dU=dW+dQ$. The work can be easily understood in terms of the Work-Energy Theorem, with $dW=-p\,dV$. However, the heat is much more mysterious. The existence of heat has been known forever, but its relationship to mechanical energy was very tricky to work out. Joule got his name attached to the SI unit of energy by demonstrating what the First Law states—that heat and work are ultimately equivalent sources of energy.
Despite the heat transfer being written "$dQ$", there is no state function $Q$, just as there is no state function $W$ that means work. $dW$ and $dQ$ are only meaningful as energy transfers, not as internal energy contributions themselves. However, we know that by dividing $dW$ by the pressure, we get the differential of something (the volume) that really is just a function of a system's state: $-dV=dW/p$. Integrating this gives the change in volume during a process as $\Delta V=-\int dW/p$. Analogously to this, it was realized that there was another state function $S$, whose change was the integral of $dQ/T$. The quantity $\Delta S=\int dQ/T$ was named the entropy, and it was found, empirically (using the equations of state for fluids and other substances), that $S$ was a state function. According to its definition, $S$ has units of energy divided by temperature (J/K in SI). Even more important than the fact that $S$ was a state function [meaning that its value depended only on the current state of a system, not its history; it doesn't matter how heat and work were added to a system to bring it to volume $V$ and temperature $T$, its entropy is $S(V,T)$ regardless], $S$ was found to be related to reversibility. It was realized that the Second Law of Thermodynamics was equivalent to the statements that, for a complete, closed system, $\Delta S=0$ for a reversible process and $\Delta S>0$ for an irreversible process. (For no process does a closed system have negative $\Delta S$.)
In the analyses leading to these steps, $S$ was considered to be a strictly abstract quantity. Unlike pressure, volume, and temperature, it was not something that could be directly measured about a system. However, it was incredibly useful, because of its connection to the First, Second (and Third, which I haven't mentioned) Laws of Thermodynamics. The notion of entropy had been developing for several decades before its precise definition was set down in the 1850s, and it was not until the 1870s that Boltzmann provided a microscopic notion that entropy was related to the number of microscopic states of a system that were consistent with the observed macroscopic state. However (and this was one of the many objections to Boltzmann's ideas), there were problems with the units in his statistical definition of entropy. Only in the twentieth century, with the development of quantum mechanics and the work of Shannon on information theory, was is possible to give a precise relation between the number of possible quantum states of a system and its entropy.
By that time, the units of entropy used in thermodynamics were firmly established. People doing practical calculations with fluids and other mechanical systems continue to use the original units of entropy. Other people, who are interested in information content or quantum statistical mechanics, often use a definition of entropy that makes $S$ dimensionless. (For example, if the probability of a system being in each possible state $i$ is $p_{i}$, the most common normalization of the entropy is $S=-\sum_{i}p_{i}\log_{2}p_{i}$.) It is possible to interconvert the definitions using constant scaling factors. For example, some definitions use the natural logarithm instead of the base-2 logarithm, which just changes the normalization of $S$ by an overall factor, since $\ln p_{i}=\log_{2}p_{i}/\log_{2}e$. To rescale $S$ to have its traditional units of energy per temperature, you just insert a factor of Boltzmann's constant $k_{B}=1.380649\times10^{−23}$ J$\cdot$K$^{−1}$. | {
"domain": "physics.stackexchange",
"id": 92098,
"tags": "thermodynamics, entropy"
} |
Leaf Topics rqt_graph | Question:
Moreover, I would like to know what does it mean LEAF TOPIC in ROS because in the rqt_graph user interface there is the option HIDE LEAF TOPICS.
Could someone help me? Thanks a lot.
p.s. When I launch rqt_graph I have the feeling that it doesn't update properly and previous nodes are showed. Of Course I click on refresh time by time. Is there something else I can do?
Originally posted by fabbro on ROS Answers with karma: 115 on 2014-12-10
Post score: 1
Original comments
Comment by bvbdort on 2014-12-10:
Leaf topics which has zero subscribers.
Answer:
As bvbdort mentioned, a leaf topic is a topic which has no subscribers. It is called that in reference to a tree structure which has branches and leaves. Elements without children are called "leaf nodes" or "end-nodes".
Since nothing is subscribing to a leaf topic, you usually are not interested in viewing it in the graph because its data is not in use. However, sometimes you need to know about a leaf topic, for example if it isn't suppose to be a leaf topic and you actually want something to be subscribed to that data!, so the option to display those topics can be toggled mainly for debugging purposes.
The problem you mention at the end of your question may be happening if you close rqt_graph, but do not close roscore, and then restart rqt_graph. There could be things still running that are showing up on the graph that you don't expect. To start fresh you can restart roscore when you restart rqt_graph. Or use the rosnode kill command to kill the nodes you don't want running. What I actually do is use rqt instead of rqt_graph, that way you can see the node graph and process monitor at the same time and it is easy to kill nodes you don't want.
Originally posted by Airuno2L with karma: 3460 on 2014-12-10
This answer was ACCEPTED on the original site
Post score: 1 | {
"domain": "robotics.stackexchange",
"id": 20298,
"tags": "ros, rqt-graph, topics"
} |
A pseudo force problem | Question: I got the following question in an app, while I was preparing for the exams.
A wedge is placed on a smooth horizontal surface. Wedge contains a circular quadrant of radius 25 cm as shown. An insect crawls on the circular path with a constant speed 1/2 m/s. A force F is applied on the wedge, so that it does not move. Find the value of F in Newton when radial line of position of insect makes an angle 60° with the horizontal.
My attempt:
I have tried to resolve the forces so far. But, the problem comes with normal reaction and coefficent of friction, both of which are adjustable quantities and I can't figure out as much equations as there are variables.
!
Please correct me if I'm wrong somewhere. I don't want a solution, just a hint so that I can solve it myself.
Answer: Just balance the forces in horizontal direction.
Forces on the insect are the friction which is equal to the tangential component of weight, the normal and the normal component of the weight. The normal components cause the centripetal accn. You can get the normal and the friction from the fbd of the insect. Now look at the wedge. Resolve the contact forces applied by the insect horizontally. Applied force F balanced that. | {
"domain": "physics.stackexchange",
"id": 57655,
"tags": "homework-and-exercises, newtonian-mechanics, forces, reference-frames"
} |
Denticity of bridging ligands | Question: How would we assign denticity to bridging ligands?
For eg, consider $\ce{NH^-_2}$, it has 2 lone pairs and can hence act as a bridging ligand, making 2 metal-ligand bonds. So I thought it should be bidentate. However, my professor says it is monodentate, why so?
Answer: It is important to clearly separate the terms denticity, hapticity and bridging.
An atom or a ligand is bridging is one single atom bonds to two different acceptors.
A ligand (but never a single atom) can be multidentate or multihaptic if two atoms of that ligand can bond to the same acceptor.
A ligand such as the amide anion cannot be multidentate or multihaptic as it only has one coordinating atom: nitrogen. This nitrogen can coordinate to two different metals (in a μ-amido complex) but it still remains a monodentate ligand.
A ligand such as ethylenediamine or en for short ($\ce{H2N-CH2-CH2-NH2}$) can coordinate to the same central atom via each of the two nitrogen atoms. This is a bidentate ligand as it has two ‘teeth’ in the central atom.
A ligand such as ethene ($\ce{C2H4}$) can coordinate to a single central atom in a bihaptic (η2) fashion. Drawing shortest lines from nucleus to nucleus, both carbon atoms would be the same distance from the acceptor atom (in reality, it is better thought of the π system donating). Hapticity is reserved for such cases in which a continuous set of atoms coordinate to a central metal.
A ligand can be both multihaptic and multidentate. An example would be dibenzylideneacetone or dba ($\ce{Ph-CH=CH-C(=O)-CH=CH-Ph}$), in which both of the $\ce{C=C}$ double bonds act as a bihaptic ligand. (Due to geometry constraints, dba usually coordinates to two different acceptors rather than to a single one in a chelate fashion.)
Finally, it is possible for a ligand such as en to be both bidentate and briding, if either of the nitrogen atoms coordinates to two different metals. This requires deprotonation of at least one nitrogen.
The bottom line is: briding ligands can be monodentate or multidentate; they can be monohaptic or multihaptic. Only bridging ligands based on a single donor atom must be monodentate. | {
"domain": "chemistry.stackexchange",
"id": 14492,
"tags": "coordination-compounds, ligand-field-theory"
} |
Photometric surveys vs. Spectroscopic surveys | Question: I consistently read about certain astronomical surveys which are either described as "photometric surveys" or "redshift surveys". I'm still unclear as to how these two methods differ.
Photometry uses the total light (or flux or brightness) received of EM radiation. Different filters are used to measure certain wavelengths of light.
By contrast, spectroscopy spreads light out into different wavelengths, thus matching absorption/emission lines shift in different objects' spectra. The absorption/emission lines are identified and wavelengths are measured.
That is relatively clear, but I'm unsure what the difference when discussing instruments. Let's say we are measuring the redshifts of galaxies. Now, are the devices used entirely different, or is the analysis of these redshifts measured different? Supposedly photometric measurements are based on CCDs.
What exactly is the difference of information we receive by analyzing the photometric redshift data vs. the spectroscopic redshift data?
Answer: Spectroscopy: you pass the light through (or reflect from) a dispersive element (a prism or diffraction grating) and then you record the dispersed light. You have a record of the intensity of the light as a function of wavelength. Advantage: potentially you can record the light of one or more objects over a very wide wavelength range and have excellent fine discrimination between different wavelengths and can look for individual spectral features. Disadvantage: You spread the light from the source thinly over the detector and it can be difficult to obtain spectra of many sources in one go (maybe 10s to 100s if you use multi-slit of multi-fibre spectrographs).
Photometry: you record images of your sources where the light is allowed to pass through coloured filters. The only wavelength information you have is the intensity of the light admitted through the filter (and also modified by the detector response). Advantages: The light from a source over your filter band is concentrated onto a spot on your detector - giving better signal-to-noise ratios.
You also get data for as many sources are in the image - potentially thousands.
Diasadvantage: The effective wavelelength resolution is only as good as how narrow the filter bandpasses are. You lose the ability to see individual spectral features.
These days the detectors used for both techniques is usually a CCD camera. So what differs is what is in the instrument prior to the CCD. For spectroscopy it is a dispersive element, for photometry a coloured filter.
Both techniques can be used in extragalactic astronomy to estimate redshifts. Taking spectra is far more accurate, but a rule of thumb is that photometric techniques, though comparatively uncertain, can be applied to objects a couple of magnitudes (a factor of 5) fainter. This rule of thumb applies to objects with a broad spectrum and strong continuum - most stars and galaxies. For certain types of sources with very strong emission lines, where the power of the source is concentrated into wavelength ranges much narrower than photometric filters, it can be the case that spectroscopy is more sensitive even in the same observing time (e.g. some types of AGN and quasars).
To get a redshift from a spectrum is a fairly obvious procedure. You try to match the positions of known spectral features and measure the wavelength shift from the restframe. Estimating a redshift from photometry needs images taken through several different filters and the "spectral energy distributions" (the crude intensity vs wavelength relation defined by the few photometric brightness measurements) are matched with those predicted from a library of model galaxies redshifted by different amounts. | {
"domain": "physics.stackexchange",
"id": 20358,
"tags": "astronomy, spectroscopy, observational-astronomy, photometry"
} |
error management when parsing multiple floats | Question: I am writing a handler to render a GIF of a Lorentz attractor. I need to parse floating point numbers from the some querystrings attached to the GIF path. If any of them are bad, I need to log an error.
func lorenzHandler(w http.ResponseWriter, r *http.Request) {
var e error
rho := atof(r.FormValue("r"), &e)
sigma := atof(r.FormValue("s"), &e)
b := atof(r.FormValue("b"), &e)
if e != nil {
log.Printf("bad parameters: r=%s; s=%s; b=%s;\n", r.FormValue("r"), r.FormValue("s"), r.FormValue("b"))
} else {
// draw lorenz GIF with parameters rho, sigma and b
// but until then...
log.Println("yay!", rho, sigma, b)
}
}
func atof(s string, e *error) float32 {
if *e == nil {
var x float64
x, *e = strconv.ParseFloat(s, 32)
return float32(x)
}
return 0
}
Is passing around an error, then cleaning it up, correct?
Answer: I miss the reason you are working with a pointer of an error and passing it in different functions.
Why not doing in the simple way?
func lorenzHandler(w http.ResponseWriter, r *http.Request) {
rho, e1 := strconv.ParseFloat(r.FormValue("r"), 32)
sigma, e2 := strconv.ParseFloat(r.FormValue("s"), 32)
b, e3 := strconv.ParseFloat(r.FormValue("b"), 32)
if e1 != nil || e2 != nil || e3 != nil {
log.Printf("bad parameters: r=%s; s=%s; b=%s;\n", r.FormValue("r"), r.FormValue("s"), r.FormValue("b"))
w.WriteHeader(http.StatusBadRequest)
return
}
// draw lorenz GIF with parameters rho, sigma and b
// but until then...
log.Println("yay!", rho, sigma, b)
}
If the problem is about performance, and you want to stop after the first error, without waiting all three executions, you can do
func lorenzHandler(w http.ResponseWriter, r *http.Request) {
rho, err := strconv.ParseFloat(r.FormValue("r"), 32)
if err != nil {
log.Printf("bad parameters: r=%s\n", r.FormValue("r"))
w.WriteHeader(http.StatusBadRequest)
return
}
sigma, err := strconv.ParseFloat(r.FormValue("s"), 32)
if err != nil {
log.Printf("bad parameters: s=%s\n", r.FormValue("s"))
w.WriteHeader(http.StatusBadRequest)
return
}
b, err := strconv.ParseFloat(r.FormValue("b"), 32)
if err != nil {
log.Printf("bad parameters: b=%s\n", r.FormValue("b"))
w.WriteHeader(http.StatusBadRequest)
return
}
// draw lorenz GIF with parameters rho, sigma and b
// but until then...
log.Println("yay!", rho, sigma, b)
} | {
"domain": "codereview.stackexchange",
"id": 26877,
"tags": "parsing, error-handling, http, go, floating-point"
} |
What causes increment in volume when compressive forces are applied on an object | Question: Doubt
When compressive forces are applied on a body, what causes increment in volume. According to me the volume should remain constant since the mass and density of body are constant.
Answer: Neither of them should remain constant. Have you seen any type of press ? There's plenty of them, take a look into a garbage collecting cars - they have standard garbage compressor integrated into machine. So this means that we can compress object volume into smaller one. Of course not all objects have same compressibility level. Ones (as water) are considered to be uncompressable. But this depends on amount of pressure you can give to object. Under extra-ordinary pressure conditions, such as inside of black-hole, water would be compressed too. Other objects compresses easily, such as most gasses at room temperature.
Compactifying volume means that we increase object's density, because density is inversely proportional to objects volume: $\rho = \frac mV$. So neither density stays constant.
If you have A LOT of compression power, then atoms of object can become so close to each other that electron capture reaction can begin :
$$ p + e^- \to n + \nu_e $$
Proton captures electron and converts into neutron emitting neutrino in the process. If you do the math correctly you will see that
$$ m_n - (m_p + m_e) = 0.7823 \,\,\left[\frac{\text{MeV}}{c^2}\right] $$
So in the end matter becomes more massive in the process of neutron accumulation. By rough analogies of mass-energy equivalence, $E=mc^2$, this means that you are accumulating/converting your compression energy into object mass, so object becomes heavier in the process. Thus mass too isn't constant under high compression. This process can be seen in neutron star formation. In Earth we don't have such huge pressure conditions, but astronomers can observe them in high-density star formation. Only in that case, pressure role is taken by gravity forces. | {
"domain": "physics.stackexchange",
"id": 67442,
"tags": "pressure, material-science, elasticity, stress-strain, volume"
} |
Quadratic equation solver in JavaScript | Question: The task is to implement a solveEquation function, which solves the Quadratic equation. Each equality has exact 2 integer solutions. Return those numbers as an ordered array.
Example:
const solutions = solveEquation('2 * x^2 - 10 * x + 12');
console.log(solutions); // [2, 3]
Solution:
function solveEquation(equation) {
// Your solution here
let a, b, c;
// Using ES6 destructuring
[a, b, c] = getValues(equation);
return getXList(a, b, c);
}
// Determine a,b,c and return in an array
function getValues(equation) {
// ax^2 + bx + c
let a = equation.split(' * x^2')[0];
let aSign = equation.split(a + ' * x^2')[0];
// Assigns +- a
a = parseInt(aSign + a);
let bSign = equation.split(a + ' * x^2 ')[1].charAt(0);
let b = equation.split(' * x^2 ' + bSign + ' ')[1].split(' * x ')[0];
// Assigns +- b
b = parseInt(bSign + b);
// Assigns +- b
let cSign = equation.split(' * x ')[1].charAt(0);
let c = equation.split(' * x ' + cSign + ' ')[1];
// Assigns +- c
c = parseInt(cSign + c);
return [a, b, c];
}
// Returns ordered Array
function getXList(a, b, c) {
let list = [];
list.push(getX('+', a, b, c));
list.push(getX('-', a, b, c));
list.sort((a, b) => a - b);
return list;
}
// Returns X
function getX(sign, a, b, c) {
// represent x = (- b +- √b^2-4ac)/2a as x = (b1 +- c1)/a1
let x, b1, a1, c1;
b1 = parseInt(- + b);
a1 = 2 * a;
c1 = Math.sqrt((b * b) - 4 * a * c);
return x = Math.round((sign === '+') ? ((b1 + c1) / a1) : ((b1 - c1) / a1));
}
console.log(solveEquation('2 * x^2 - 10 * x + 12'));// [2, 3]
console.log(solveEquation('-20 * x^2 - 108797540 * x - 130011773690520'));// [-3667291, -1772586]
console.log(solveEquation('294 * x^2 - 141195558 * x - 1600964090384736'));// [-2105744, 2586001]
console.log(solveEquation('-267 * x^2 + 296412186 * x + 4722715166392080'));// [-3687112, 4797270]
console.log(solveEquation('-381 * x^2 + 2871374115 * x - 5385906614046864'));// [3516911, 4019504]
console.log(solveEquation('-154 * x^2 + 645219652 * x - 602658645850800'));// [1405588, 2784150]
The main reason I post this program is to get critical feedback and perhaps improve my coding style.
Answer: Buggy as.
Unusable I made a single change and then could not get it to work (turned out I added an extra space)
Bugs
Any spaces in the wrong spot, and deviations from the simplified equation, any fractions and more will cause the code to throw a variety of errors.
Expectation
When you create a function people that use it have idiomatic expectations. You must attempt to conform at least to common variations on these expectations, and or ensure that your code does not throw errors if the input is not as expected.
One would expect a equation solver to be robust and handle all the standard input variations, like be
invariant to white spaces,
invariant to equation layout (1 * x ^ 2 + 1 * x ^ 2) === ( 2 * x ^ 2)
invariant to form of power (x * x) === (x ^ 2)
invariant to implied constants (x + 0) === (x), (1*x) === (x) and (2*x^2+0*x) === (2*x^2)
invariant to equation form (x + 1) ^ 2 === ((x + 1) * (x + 1)) === (x ^ 2 + 2 * x + 1)
I invariant to implied multiplier (2x === 2 * x) Note the number (or variable) if before x and has no operator, the multiplication * is always implied.
Other problems.
Not all quadratics have solutions.
A quadratic can have zero solutions as it is above of below the x axis. Eg x ^ 2 + 1
A quadratic can have one solution if it just touches the x axis eg x ^ 2
A quadratic can have two solution eg x ^ 2 - 1 * x
Quadratics can also include fractions 1/2 or 0.5. You should also be able to add the equations strings together, two or more 2nd order polynomials added together (joined with a "+" or "-") should equate into a valid quadratic.
Naming
The function naming is rather bad. To name a few examples
getValues maybe parseEquation
getXList mabe solveQuadratic
list maybe coefficients
Constants
You should use const for constants. Some examples...
const [a, b, c] = getValues(equation);
const list = [];
const cSign = equation.split(' * x ')[1].charAt(0);
And most likely more problems but as the code is so hard to use I have not looked any deeper into your algorithm.
Conclusion.
I would say "Try again." as it is not at all what I would expect from a function that solves quadratics. | {
"domain": "codereview.stackexchange",
"id": 30255,
"tags": "javascript, parsing"
} |
Centripetal Acceleration as a Cross Product | Question: Is it fine to express the centripetal acceleration as a cross product?
a=v X w (where a is centripetal acceleration, v is magnitude of velocity, w is angular velocity)
And is it v X w or w X v?
What I think:
Since centripetal acceleration requires tangential (perpendicular) velocity, I start thinking about cross products, and was able to express the acceleration vector as 2 other vectors.
Fiddling around with my right hand, I think that a=v X w and not a=w X v.
Where the convention is
-angular velocity towards me implies positive anticlockwise movement
-centripetal acceleration upwards is taken as positive
-velocity has to move in a way to cause anticlockwise movement
Thing is, I've been searching this up on the Internet but couldn't find any resources for confirmation.
Is it true that centripetal acceleration can be represented as the cross product of velocity and angular velocity? v X w
Answer: $$\vec{a}=\vec{\omega} \times \vec{v}$$
This delivers the goods for a point moving at speed $v$ in a circular path with angular velocity $\vec\omega$ about the centre of the circle. Because the circle lies in one plane, the direction of $\vec\omega,$ as well as its magnitude, is constant.
We can indeed derive the centripetal acceleration formula rather neatly starting with
$$\vec{v}=\vec{\omega} \times \vec{r}$$
So$$\frac{d\vec{v}}{dt}=\frac{d\vec{\omega}}{dt} \times r + \vec\omega \times \frac{d\vec r}{dt}$$
The first term on the right disappears because $\vec\omega$ is a constant for a body moving in a circle at constant speed, so you're left with$$\vec a=\vec\omega \times \vec v$$ | {
"domain": "physics.stackexchange",
"id": 55189,
"tags": "velocity, vectors, centripetal-force, angular-velocity"
} |
Explaining the difference between computer science and computer literacy | Question: What is a good metaphor or example to explain to an English major the difference between classical computer science and "being good with using MS-Windows"
computer science
computer programming
using computers
3 profoundly different things. Most people have no idea what Computer Science even is. They just see the word "computer". Hence, "he is a Computer Science major" can be interpreted as "He can hook up my printer". Or that he's "good with computers". Even fewer people know the difference between computer programming and Computer Science.
Computer Science is computing theory. CS can be learned without actual computers. CPU micro architecture. How to sort numbers, how to traverse lists, etc. State machines. Algorithms, big(Oh), etc. How to design a programming language or compiler.
Programming is writing code and creating applications in a language and compiler created by a computer scientist.
Lastly, there is using a computer (using a GUI, mouse, and keyboard. Internet, MS-Office, etc)
Yet all three of these are used interchangeably by laymen.
What is a good metaphor or example to explain to an English major the difference between classical computer science and "being good with using MS-Windows" Or simply, a pithy example of how real computer science has nothing to do with using MS-Windows.
Answer: How about an automotive analogy?
uses computers and maybe "is good with computers" :: a driver (can drive and refuel safely) and maybe a car enthusiast (can jump start a car; is familiar with many makes and models; knows techniques like using windshield treatment to keep rain from reducing visibility).
programmer :: an automotive mechanic or technician. Knows how cars work. Can repair and modify cars and even build kit cars. Ought to know how to debug/diagnose problems by using the scientific method. Might not be aware of relevant theory and thus might write O(n2) loops.
software engineer :: an automotive engineer. Designs cars, engines, and other components that you can entrust your life with, and does it within schedule, cost, manufacturability, and other constraints. Knows how to apply the relevant theory/math such as finite element analysis.
computer scientist :: an automotive scientist. Researches new ideas in vehicles, human-machine interfaces, and propulsion. Does computational crash test modeling. Adds to the body of theory and experimental results.
So for people who equate all “computing” with “proficient in using some software package,” that's like equating driving proficiency with the ability to design antilock brakes that we trust lives to, that are manufacturable with consistent high quality and low cost, and work for years in extreme weather. Or equating driving proficiency with researching what kind of radar-triggered braking features will avoid collisions without freaking the driver into swerving into another lane.
Perhaps lay people confuse these terms because "computer science" classes teach computer use skills, programming, theory, or engineering. All that stuff (arguably not the first part) fits in the curriculum of computer science. None of it is the end-all "content" of computer science, just as English classes are learning on the way to an English major (a fuzzier concept). | {
"domain": "cs.stackexchange",
"id": 2795,
"tags": "terminology, education"
} |
What do Wild Silverfish Eat? | Question: Silverfish are famous for eating wallpaper paste.
The paste is made of starch mixed with water. Wikipedia says silverfish love starch
They consume matter that contains polysaccharides, such as starches and dextrin in adhesives.[4] These include book bindings, carpet, clothing, coffee, dandruff, glue, hair, some paints, paper, photos, plaster, and sugar.
But all of these are artificial substances. I wonder what do they eat in the wild?
I suspected they eat wood but a cursory search suggests they do not. Or maybe the articles I found are talking about them not eating wooden furniture. Perhaps they prefer to eat damp wood such as rotting logs in the wild.
Most of the obvious info on Silverfish is about what they do inside your house, rather than in the wild. Does anyone know what they eat in the wild?
Answer: Silverfish are shredders. Shredders are...
...responsible for processing coarse particulate organic
matter (CPOM). CPOM is greater than 1 mm in size and typically derived
from allochthonous sources, such as woody debris, leaves, and other
vegetation [..].
Their diet includes sources of starch and protein, including grains, vegetables, fibers, sugars and fabrics. Silverfish can actually digest cellulose, a feat not shared by many animals. They are able to do so thanks to cellulase activity in microorganisms that live in their gut (Lasker & Giese, 1957).
Reference
- Lasker & Giese, J Exp Biol (1957) 33(3): 54–553 | {
"domain": "biology.stackexchange",
"id": 12057,
"tags": "zoology, entomology, behaviour, invertebrates, diet"
} |
Is there anything we are aware of that can't be slowed by time dilation? | Question: While thinking about the ambitions of this post: Can radioactivity be slowed through time dilation?
I was asking myself, is there even anything that is dependent on time and can't be slowed through time dilation?
Answer: No. It affects time on a fundamental level. All processes are affected.
Since the processes are occuring normally in the object’s own rest frame, and it is only the observer that perceives a different measure of time, clearly it must apply to everything without exception.
Consider two points in 4D spacetime. They represent an experimental box where different processes are performed: chemicals react, a spring unwinds, radioisotope decays, etc. Everything you can think of is represented. One point is “before” and the other point is “after”. In the experiment’s own frame there is no change in position and the interval between the two points is only in the time direction, of measure t1.
Some other observer will measure different values for the spacial separation and time t2. Now does it even make sense to say that some processes will be measured at a time other than t2? That would mean they are no longer on that point: Observer 2 sees the spring unwind before the acid react, as opposed to observer 1 which sees them take the same time? That is not a different view of spacetime but a different reality.
The box (observer 1) is sitting there at rest minding its own business. How can the presence of observer 2 (moving at relativistic speed relative to the box) somehow make the different processes act differently? | {
"domain": "physics.stackexchange",
"id": 34933,
"tags": "reference-frames, relativity, time-dilation, observers"
} |
Got controller gains for six-axis manipulator? | Question:
Do you have working PID gains for the effort_controllers/JointTrajectoryController of a six-axis manipulator that I could use as a starting point for tuning the gains on a UR10? The universal_robot package uses the PositionJointInterface class for getting the joints to their desired positions but this mechanism causes unrealistic interactions between the manipulator and the Gazebo environment. For that reason I've changed the package locally to use the EffortJointInterface following this example. The author notes that the gains there are not working. Please share with me any advice on tuning a 6-R 'bot or working gains for a manipulator.
Originally posted by raequin on ROS Answers with karma: 368 on 2018-09-16
Post score: 1
Original comments
Comment by pmuthu2s on 2019-03-20:
Do you have the values which worked for you?
Comment by raequin on 2019-03-20:
The values at the link in the accepted answer worked well.
linear_arm_actuator_joint: {p: 10000, d: 500, i: 0, i_clamp: 1}
shoulder_pan_joint: {p: 10000, d: 150, i: 0, i_clamp: 1}
shoulder_lift_joint: {p: 50000, d: 150, i: 10, i_clamp: 50}
elbow_joint: {p: 50000, d: 150, i: 1, i_clamp: 25}
wrist_1_joint: {p: 100, d: 5, i: 0, i_clamp: 1}
wrist_2_joint: {p: 75, d: 2, i: 0, i_clamp: 1}
wrist_3_joint: {p: 25, d: 1, i: 0, i_clamp: 1}
Answer:
I don't know whether they "work", but you could take a look at the values that OSRF used for the ARIAC 2017 competition: osrf/ariac/src/osrf_gear/launch/ur10/arm_controller_ur10_custom.yaml.
Originally posted by gvdhoorn with karma: 86574 on 2018-09-17
This answer was ACCEPTED on the original site
Post score: 2
Original comments
Comment by raequin on 2018-09-17:
Those values perform better for me than the original ones in the link included in the question. The ones you referred to are a good starting point for tuning :)
Comment by gvdhoorn on 2018-09-17:
If you manage to improve the behaviour by using different values, it would be nice if you could post here to let us know which values you used.
Comment by raequin on 2018-09-24:
The first time I used the posted gains was with a voluminous tool on the robot and the manipulator had a lot of overshoot. Since then, my simulations have used more conventional tooling and performance has been fine. That is, these gains seem okay to me :)
Comment by gvdhoorn on 2018-09-24:
Nice.
We might add them as defaults to the improved version of the universal_robot packages. | {
"domain": "robotics.stackexchange",
"id": 31781,
"tags": "pid, ros-kinetic, ur10, gazebo-ros, universal-robot"
} |
Calculation about two-point correlation function in Fourier space | Question: In the book Cosmology by Daniel Baumann, when talking about the two-point correlation function of the density fluctuation $\delta(\vec{x})=\delta(\vec{x})$ for a fixed time $t$, the author states that this correlation function in Fourier space is the following:
\begin{aligned}
\left\langle\delta(\mathbf{k}) \delta^{*}\left(\mathbf{k}^{\prime}\right)\right\rangle &=\int \mathrm{d}^{3} x \mathrm{~d}^{3} x^{\prime} e^{-i \mathbf{k} \cdot \mathbf{x}} e^{i \mathbf{k}^{\prime} \cdot \mathbf{x}^{\prime}}\left\langle\delta(\mathbf{x}) \delta\left(\mathbf{x}^{\prime}\right)\right\rangle \\
&=\int \mathrm{d}^{3} r \mathrm{~d}^{3} x^{\prime} e^{-i \mathbf{k} \cdot \mathbf{r}} e^{-i\left(\mathbf{k}-\mathbf{k}^{\prime}\right) \cdot \mathbf{x}^{\prime}} \xi(r) \\
&=(2 \pi)^{3} \delta_{\mathrm{D}}\left(\mathbf{k}-\mathbf{k}^{\prime}\right) \int \mathrm{d}^{3} r e^{-i \mathbf{k} \cdot \mathbf{r}} \xi(r) \\
& \equiv(2 \pi)^{3} \delta_{\mathrm{D}}\left(\mathbf{k}-\mathbf{k}^{\prime}\right) \mathcal{P}(k),
\end{aligned}
where $\mathbf{r}=\mathbf{x}-\mathbf{x'}$. I don't know why this correlation function needs to include the complex conjugate of $\delta(\mathbf{k'})$ instead of just being $\left\langle\delta(\mathbf{k}),\delta(\mathbf{k'})\right\rangle$, but apart from that, I understand the first and second lines. My question arises in the step between the second and third lines. I know that the Fourier transform of the Dirac delta $\delta_D$ is:
$$\mathcal{F}[\delta(\mathbf{x}-\mathbf{x_0})]=\int_{\mathbb{R}^3}d^3xe^{-i\mathbf{k}\cdot\mathbf{x}}\delta(\mathbf{x}-\mathbf{x_0})=e^{-i\mathbf{k}\cdot\mathbf{x_0}}$$
Therefore, if I use the inverse Fourier transform on this result, I get:
$$\delta(\mathbf{x}-\mathbf{x_0})=\mathcal{F}^{-1}(e^{-i\mathbf{k}\cdot\mathbf{x_0}})=\int\dfrac{d^3k}{(2\pi)^3}e^{i\mathbf{k}\cdot\mathbf{x}}e^{-i\mathbf{k}\cdot\mathbf{x_0}}=\int\dfrac{d^3k}{(2\pi)^3}e^{i\mathbf{k}\cdot(\mathbf{x}-\mathbf{x_0})}$$
This means that, renaming $\mathbf{k}$ as $\mathbf{x'}$, $\mathbf{x}$ as $\mathbf{k}$ and $\mathbf{x_0}$ as $\mathbf{k'}$, we have:
$$(2\pi)^3\delta(\mathbf{k}-\mathbf{k'})=\int d^3x\ e^{i(\mathbf{k}-\mathbf{k'})\mathbf{x'}}$$
But what the book seems to have used to get the third line from the second one is this formula instead:
$$(2\pi)^3\delta(\mathbf{k}-\mathbf{k'})=\int d^3x\ e^{-i(\mathbf{k}-\mathbf{k'})\mathbf{x'}}$$
So, where does the minus in the exponent come from? Or is there an errata in the book?
Answer: The delta function is an even distribution: i.e. $\delta(x)=\delta(-x)$ in general.
Also note that $\langle \delta(k) \delta^*(k') \rangle$ is an expectation value here, $\bf{not}$ an inner product (although this could be written differently using an inner product). | {
"domain": "physics.stackexchange",
"id": 95317,
"tags": "cosmology, fourier-transform, correlation-functions"
} |
Could you produce electricity from the alpha rays emitting from Americium-241? | Question: I was just wondering if you could take a small amount of Am241 and use the gamma rays coming off of it to generate electricity. I don't really know how this would work, so that's why I'm asking it here. Thanks in advance!
Answer: Sure, you can just absorb the radiation as heat and use it to generate electricity thermoelectrically. This is how the Voyager space probes maintain power, for example. This is also mentioned on the wikipedia page for Am241. | {
"domain": "physics.stackexchange",
"id": 100434,
"tags": "nuclear-physics, radiation"
} |
Divide list into n parts in scala | Question: The problem is to divide a list into n parts according to a predicate. I found recursive solution but I am sure that a more elegant solution also exists.
def repeatedSpan[T](col: Iterable[T], pred: T => Boolean): List[Iterable[T]] = {
if (col.isEmpty) Nil
else {
val (a, b) = col.span(pred)
if (a.isEmpty) b.take(1) :: repeatedSpan(b.tail, pred)
else a :: repeatedSpan(b, pred)
}
}
scala> repeatedSpan(List(1, 2, 3, 1, 4, 5, 6, 1, 7), (a: Int) => a != 1)
res9: List[Iterable[Int]] = List(List(1), List(2, 3), List(1), List(4, 5, 6), List(1), List(7))
Answer: I found about 5 things that can be improved, or maybe it's more accurate to say that I did 5 passes over the code in my efforts to improve it. Here's the changes I would recommend, in the order that I encountered them.
Better Names
The first change was mostly cosmetic, but it was very helpful laying the groundwork for more changes later.
def repeatedSpan[T](iterable: Iterable[T], pred: T => Boolean): List[Iterable[T]] = {
if (iterable.isEmpty) Nil
else {
val (matches, rest) = iterable.span(pred)
if (matches.isEmpty) rest.take(1) :: repeatedSpan(rest.tail, pred)
else matches :: repeatedSpan(rest, pred)
}
}
Naming is hard, particularly as rest is really the element that failed the predicate, and everything past that point.
Multiple Parameter Lists
This change was partially a help for the compiler, and partially to enable a bit of syntax that I'm partial to.
def repeatedSpan[T](col: Iterable[T])(pred: T => Boolean): List[Iterable[T]] = {
if (col.isEmpty) Nil
else {
val (a, b) = col.span(pred)
if (a.isEmpty) b.take(1) :: repeatedSpan(b.tail)(pred)
else a :: repeatedSpan(b)(pred)
}
}
By switching to a signature with multiple parameters, the compiler can infer types more easily, and as an additional bonus users can now use the shortened function literal syntax.
repeatedSpan(basicList, (a:Int) => a != 1) # Not bad to type
repeatedSpan(basicList)(_ != 1) # Much nicer!
Pattern Matching
The next change moved from if () else to pattern matching. I did this for two reasons. The first is that it allowed better naming that rest and generally made the logic a bit more clear. The second was that it paves the way for one of the other changes that I had my eye on.
def repeatedSpan[T](iterable: Iterable[T])(pred: T => Boolean): List[Iterable[T]] =
iterable.span(pred) match {
case (Nil, doesNotMatch :: unTested) =>
List(doesNotMatch) :: repeatedSpan(unTested)(pred)
case (matchingPrefix, doesNotMatch :: unTested) =>
matchingPrefix :: List(doesNotMatch) :: repeatedSpan(unTested)(pred)
case (matchingPrefix, Nil) => List(matchingPrefix)
}
This had the added bonus of eliminating a level of nesting, but the big gain was being able to easily decompose the results into matchingPrefix, doesNotMatch, and unTested.
Tail Recursion
One of the big remaining issues is that this will blow the stack on large lists. So the next step, facilitated by the simplification we gained by switching to pattern matching, was to rework it into a tail recursive version.
def repeatedSpan[T](iterable: Iterable[T])(pred: T => Boolean): List[Iterable[T]] = {
@tailrec
def loop(accum: List[Iterable[T]], rest: Iterable[T]): List[Iterable[T]] =
rest.span(pred) match {
case (Nil, doesNotMatch :: unTested) =>
loop(List(doesNotMatch) :: accum, unTested)
case (matchingPrefix, doesNotMatch :: unTested) =>
loop(List(doesNotMatch) :: matchingPrefix :: accum, unTested)
case (matchingPrefix, Nil) => List(matchingPrefix)
matchingPrefix :: accum
}
loop(Nil, iterable).reverse
}
This has a major weakness. Tail recursion lets us work with large Iterables, but a List is a singly linked list, and they do not append well. To fix this, I created the list in reverse, then at the end reversed the result. I was not particularly happy with adding this complexity, but it was necessary to gain access to tail-call optimization and avoid blowing the stack with larger lists.
Program to the Interface
The best way to remove the unnecessary complexity introduced in the last step was to switch data structures. This would be more difficult if this had a bunch of code that called it, but it was simple to modify the signature and use a Vector instead, which has excellent append performance.
def repeatedSpan[T](iterable: Iterable[T])(pred: T => Boolean): Iterable[Iterable[T]] = {
@tailrec
def loop(accum: Vector[Iterable[T]], rest: Iterable[T]): Iterable[Iterable[T]] =
rest.span(pred) match {
case (Nil, doesNotMatch :: unTested) => loop(accum :+ List(doesNotMatch), unTested)
case (matches, doesNotMatch :: unTested) => loop(accum :+ matches :+ List(doesNotMatch), unTested)
case (matches, Nil) => accum :+ matches
}
loop(Vector(), iterable)
}
Now that we are using a more appropriate data structure, we have regained the simplicity we had before, and don't run out of stack space. | {
"domain": "codereview.stackexchange",
"id": 17727,
"tags": "scala"
} |
Einstein's postulates $\leftrightarrow$ Minkowski space for a Layman | Question: What's the cleanest/quickest way to go between Einstein's postulates [1] of
Relativity: Physical laws are the same in all inertial reference frames.
Constant speed of light: "... light is always propagated in empty space with a definite speed $c$ which is independent of the state of motion of the emitting body."
to Minkowski's idea [2] that space and time are united into a 4D spacetime with the indefinite metric
$ds^2 = \vec{dx^2} - c^2 dt^2$.
Related to the question of what is the best derivation of the correspondence are:
Is the correspondence 1:1? (Does the correspondence go both ways?)
and are there any hidden/extra assumptions?
Edit
Marek's answer is really good (I suggest you read it and its references now!), but not quite what I was thinking of.
I'm looking for an answer (or a reference) that shows the correspondence using only/mainly simple algebra and geometry. An argument that a smart high school graduate would be able to understand.
Answer: I will first describe the naive correspondence that is assumed in usual literature and then I will say why it's wrong (addressing your last question about hidden assumptions) :)
The postulate of relativity would be completely empty if the inertial frames weren't somehow specified. So here there is already hidden an implicit assumption that we are talking only about rotations and translations (which imply that the universe is isotropic and homogenous), boosts and combinations of these. From classical physics we know there are two possible groups that could accomodate these symmetries: the Gallilean group and the Poincaré group (there is a catch here I mentioned; I'll describe it at the end of the post). Constancy of speed of light then implies that the group of automorphisms must be the Poincaré group and consequently, the geometry must be Minkowskian.
[Sidenote: how to obtain geometry from a group? You look at its biggest normal subgroup and factor by it; what you're left with is a homogeneous space that is acted upon by the original group. Examples: $E(2)$ (symmetries of the Euclidean plane) has the group of (improper) rotations $O(2)$ as the normal subgroup and $E(2) / O(2)$ gives ${\mathbb R}^2$. Similarly $O(1,3) \ltimes {\mathbb R}^4 / O(1,3)$ gives us Minkowski space.]
The converse direction is trivial because it's easy to check that the Minkowski space satisfies both of Einstein postulates.
Now to address the catch: there are actually not two but eight kinematical groups that describe isotropic and uniform universes and are also consistent with quantum mechanics. They have classified in the Bacry, Lévy-Leblond. The relations among them is described in the Dyson's Missed opportunities (p. 9). E.g., there is a group that has absolute space (instead of absolute time that we have in classical physics) but this is ruled out by the postulate of constant speed of light. In fact, only two groups remain after Einstein's postulate have been taken into account: besides the Poincaré group, we have the group of symmetries of the de Sitter space (and in terms of the above geometric program it is $O(1,4) / O(1,3)$).
Actually, one could also drop the above mentioned restriction to groups that make sense in quantum mechanics and then we could also have an anti de Sitter space ($O(2,3) / O(1,3)$). In fact, this shouldn't be surprising as general relativity is a natural generalization of the special relativity so that the Einstein's postulates are actually weak enough that they describe maximally symmetric Lorentzian manifolds (which probably wasn't what Einstein intented originally). | {
"domain": "physics.stackexchange",
"id": 14452,
"tags": "special-relativity, spacetime"
} |
Change pitch of a wav file without changing the Sample rate? | Question: I'm creating a Python library I call PAL, (Python Audio Library) and I need some help. What ways are there to change the pitch of a wav file without changing the sampling rate? I have complete access to the data contained in said file as numbers so if there is an equation or method that would work please let me know. To recap
Change the pitch
Don't touch the sampling rate
I can mathematically change the samples freely
Answer: If you're looking for a library solution, there's a few, for instance librubberband, binaries available in many Linux distros.
If you need a wheel to reinvent, well, approaches may vary, from granular stretching (more applicable to time stretch, but these two effects always come hand in hand), to FFT uppitch (probably somewhat similar in the frequency domain), I doubt there's a standard consensus on how to implement this effect. | {
"domain": "dsp.stackexchange",
"id": 6401,
"tags": "pitch"
} |
Stable Sort in C# | Question: I wrote a stable sort algorithm in C#. It is just a simple iteration, and is probably not very optimal:
static void StableSort(ref ObservableCollection<string> Vals, ref ObservableCollection<int> Weight)
{
for (int i = 0; i < Vals.Count; i++)
{
int LargestVal = -1;
int LargestValIndex = 0;
for (int j = i; j < Vals.Count; j++)
{
if (Weight[j] > LargestVal)
{
LargestVal = Weight[j];
LargestValIndex = j;
}
}
if (LargestValIndex != i)
{
Weight.Insert(i, Weight[LargestValIndex]);
Weight.RemoveAt(LargestValIndex + 1);
Vals.Insert(i, Vals[LargestValIndex]);
Vals.RemoveAt(LargestValIndex + 1);
}
}
}
You can see how it works by running it with this code (this isn't what I want a review on, but if you see something really wrong, please tell me):
static void Main(string[] args)
{
ObservableCollection<int> i = new ObservableCollection<int>() { 1, 2, 4, 6, 7, 2, 4, 7, 3, 3, 7 };
ObservableCollection<string> s = new ObservableCollection<string>() { "1", "2a", "4a", "6", "7a", "2b", "4b", "7b", "3a", "3b", "7c" };
foreach (int item in i)
{
Console.Write(item + ", ");
}
Console.WriteLine();
foreach (string item in s)
{
Console.Write(item + ", ");
}
Console.WriteLine();
StableSort(ref s, ref i);
Console.WriteLine();
foreach (int item in i)
{
Console.Write(item + ", ");
}
Console.WriteLine();
foreach (string item in s)
{
Console.Write(item + ", ");
}
Console.WriteLine();
}
This is going to be integrated into a larger project that requires that I use ObservableCollections. At the most, there will probably be 20-50 values being sorted, but this is going to be run on phones and other devices without a lot of computing power, so I would also like to know if this should be optimized or if a different sorting method should be used to improve performance. A stable sort isn't absolutely required, but I would much prefer it. Thanks for any tips ahead of time!
Answer: Possible problems
you don't check for Vals == null nor for Weigth == null
you don't check if Vals.Count == Weight.Count
a object which subscribed the CollectionChanged Event of the collection, will get much work
Guard condition
If we replace
if (LargestValIndex != i)
{
//do the swapping
}
by
if (LargestValIndex == i) { continue; }
// do the swapping
we will save a level of indention.
Naming
Based on the naming guidelines parameters and local variables should be named using camelCase casing.
Because an ObservableCollection will by its nature contain multiple items, a parameter representing one should be named using the plural form.
Refactoring
Implementing all above will lead to
static void StableSort(ref ObservableCollection<string> values, ref ObservableCollection<int> weights)
{
if (values == null) { throw new ArgumentNullException("values"); }
if (weights == null) { throw new ArgumentNullException("weights"); }
if (values.Count != weights.Count) { throw new ArgumentOutOfRangeException("collections count not equal", (Exception)null); }
IList<string> localValues = new List<string>(values);
IList<int> localWeights = new List<int>(weights);
for (int i = 0; i < values.Count; i++)
{
int largestWeight = -1;
int largestWeightIndex = 0;
for (int j = i; j < values.Count; j++)
{
if (localWeights[j] > largestWeight)
{
largestWeight = localWeights[j];
largestWeightIndex = j;
}
}
if (largestWeightIndex == i) { continue; }
localWeights.Insert(i, localWeights[largestWeightIndex]);
localWeights.RemoveAt(largestWeightIndex + 1);
localValues.Insert(i, localValues[largestWeightIndex]);
localValues.RemoveAt(largestWeightIndex + 1);
}
values = new ObservableCollection<string>(localValues);
weights = new ObservableCollection<int>(localWeights);
}
now that it is a clean implementation we are finished.
But wait, we can do better.
We can use the OrderByDescending() extension method together with Select() extension method and anonymous types.
static void StableSort(ref ObservableCollection<string> values, ref ObservableCollection<int> weights)
{
if (values == null) { throw new ArgumentNullException("values"); }
if (weights == null) { throw new ArgumentNullException("weights"); }
if (values.Count != weights.Count) { throw new ArgumentOutOfRangeException("collections count not equal", (Exception)null); }
IList<string> localValues = new List<string>();
IList<int> localWeights = new List<int>();
int index = -1;
var weightsWithIndex = weights.Select(p => new { Value = p, Index = ++index }).OrderByDescending(p => p.Value);
foreach (var w in weightsWithIndex)
{
localWeights.Add(w.Value);
localValues.Add(values[w.Index]);
}
values = new ObservableCollection<string>(localValues);
weights = new ObservableCollection<int>(localWeights);
}
So, what is this fancy, cool stuff
weights.Select(p => new { Value = p, Index = ++index })
??
We create for each item in weights a anonymous type where we create a property Value and assign the item of weights and before we assign the index to the Index property we increment it. So basically this is just like creating a Tuple but a way cooler.
Some related information
by definition anonymous types are immutable.
the order of properties matters
var p1 = new { X=1, Y=2 };
var p2 = new { X=1, Y=2 };
var p3 = new { Y=2, X=1 };
here p1.Equals(p2) == true but p1.Equals(p3) == false
An extract from a good reading about this subject
In short, an anonymous type is a reference type that derives directly
from object and is defined by its set of properties base on their
names, number, types, and order given at initialization. In addition
to just holding these properties, it is also given appropriate
overridden implementations for Equals() and GetHashCode() that take
into account all of the properties to correctly perform property
comparisons and hashing. Also overridden is an implementation of
ToString() which makes it easy to display the contents of an anonymous
type instance in a fairly concise manner. | {
"domain": "codereview.stackexchange",
"id": 11192,
"tags": "c#, sorting"
} |
How to use IK library inside Python SMACH state (Python)? | Question:
I want to create create complex behaviors using the SMACH library (e.g. grasping an object, bipod walking, etc).
In order to maintain flexibility, inside my SMACH state machines, I've used parameterized states (e.g. I have a MoveAlongPath state which I instantiate with a list of joint waypoints to produce different movements: lift leg, plant leg, etc).
I want to use an inverse kinematics library inside a SMACH state (e.g. Calculate IK) to generate a list of waypoints (which will be passed on to instances of the Movement state).
I am interested in the HEBI Robotics API and the trac_ik library - both of which are in C++.
My question is:
What's the recommended ROS way to make calls to a C++ IK library from inside a Python SMACH state?
... Boost Python, C++ SimpleActionServer?
edit:
trac_ik has a pull request for a python wrapper but it hasn't been merged in yet.
edit:
As of 2018/29/01
trac_ik_python has been released.
HEBI has announced a Python API for their motors/kinematics library
Originally posted by josephcoombe on ROS Answers with karma: 697 on 2017-11-27
Post score: 0
Original comments
Comment by gvdhoorn on 2018-01-30:
Why delete the question? Have you solved it? If so, then please post an answer and accept your own answer.
Comment by josephcoombe on 2018-01-30:
I didn't really solve the question as stated, but trac_ik_python has been released - eliminating the original need.
I have modified the question title, added my own answer, and undeleted the question as recommended.
Answer:
As of 2018-01-04, trac_ik has swig based python wrapper trac_ik_python
As of ~2018-01-31, this will be available in the ROS kinetic repository.
$ sudo apt-get install ros-kinetic-trac-ik
or from source at https://bitbucket.org/traclabs/trac_ik
Additionally, HEBI is developing a Python API and a ROS API for their motors/kinematics library
Originally posted by josephcoombe with karma: 697 on 2018-01-30
This answer was ACCEPTED on the original site
Post score: 0 | {
"domain": "robotics.stackexchange",
"id": 29460,
"tags": "ros, inverse-kinematics, smach, trac-ik"
} |
How to start prediction from dataset? | Question: I have event dataset in a factless table. It has list of events
timestamp-> event name -> node ( In network)
There is always a sequence of event happening. So how do I start predicting future event based on past one and discover list of nodes that will be affected from past experience.
I am programmer without knowledge of machine learning. I have installed spark,R and have dataset in oracle database.Is there any tutorial/algorithm that I can use to get started. I taught myself scala/R but have no idea on getting started. My dataset are huge i.e. more than 9billion rows for 3 months.
Node Eventtime alarmname
192.168.1.112 6/14/2016 19:41 Main power supply has a fault alarm
192.168.1.113 6/14/2016 19:23 Association path broken
192.168.1.113 6/14/2016 19:23 NA
192.168.1.113 6/14/2016 19:23 Association broken
192.168.1.112 6/14/2016 19:23 Mains Failure
192.168.1.112 6/14/2016 19:23 Mains Failure
Additional Information:
I have 98 nodes. I would like to predict:
i. No of node that has alarm or goes down when a single node goes down
e.g. if node A has alarm if in 1 month list of node that has alarm at the same period
ii. The sequence of event occurrence i.e. if one node has mains failure then the next event would be node down.
Answer: The problem you are facing is a time series problem.
Your events are categorial which is a specific case (so most common techniques like arima and Fourier transform are irrelevant).
Before getting into the analysis, try to find out whether the events among nodes are independent. If they are independent, you can break them into sequences per node and analyze them. If they are not (e.g., "Main power supply has a fault alarm" on node x indicates the same event on node y) you should the combined sequence. Sometime even when the the sequence are dependent you can gain from using the per node sequence as extra data.
You data set is quite large, which means that computation will take time. You data is probably noisy, so you will probably have some mistakes. Therefore, I recommend advancing in small steps from simple models to more complex ones.
Start with descriptive statistics, just to explore the data. How many events do you have? How common are they? What is the probability of the events that you try to predict? Can you remove some of the events as meaningless (e.g., by using domain knowledge)?
In case you have domain knowledge that indicates that recent events are the important ones, I would have try predicting based on the n last events. Start with 1 and grow slowly since the number of combinations will grow very fast and the number of samples you will have for each combination will become small and might introduce errors.
Incase that the important event are not recent, try to condition on these events in the past.
In most cases such simple model will help you get a bit above the baseline but not too much. Then you will need more complex models. I recommend using association rules that fit your case and have plenty of implementations.
You can further advance more but try these technique first.
The techniques mentioned before will give you a model the will predict the probability that a node will be down, answering your question (ii). Running it on the sequence of the nodes will enable you to predict the number of nodes that will fail answering question (i). | {
"domain": "datascience.stackexchange",
"id": 903,
"tags": "machine-learning, predictive-modeling, apache-spark"
} |
Using common code inside a Higher Order Component | Question: I have a common List in mind which looks something like this:
it has one remove button
one input in which user can enter the name of element and on clicking insert the element will be added in the list.
For this i have added HOC like this:
function ListHOC(Component, data, listName, placeholder) {
return class extends React.Component {
constructor() {
super();
this.state = { data, element: "" };
}
add = item => {
const { data } = this.state;
data.push(item);
this.setState({ data });
};
remove = keyToRemove => {
const { data } = this.state;
const newData = data.filter(({ key }) => keyToRemove !== key);
this.setState({ data: newData });
};
render() {
const { data, element } = this.state;
const updatedList = data.map(({ name, key }) => (
<div style={{ display: "flex" }} key={key}>
<div>{name}</div>
<button onClick={() => this.remove(key)}>remove</button>
</div>
));
return (
<>
<div>{listName}: </div>
<Component data={updatedList} {...this.props} />
<input
placeholder={placeholder}
onChange={e => this.setState({ element: e.target.value })}
/>
<button
onClick={() => this.add({ name: element, key: data.length + 1 })}
>
insert
</button>
</>
);
}
};
}
one thing i am not sure about is weather to use the input and button and listname inside HOC or not
link to codepen: https://codepen.io/saxenanihal95/pen/NWKVJOx?editors=1010
Answer: There's no reason to use a HOC for this, it can be done more simply and clearly with a component:
class List extends React.Component {
state = { data: this.props.initialData, element: "" };
add = item => {
this.setState(prev => ({ data: prev.data.concat(item) }));
};
remove = keyToRemove => {
this.setState(prev => ({
data: prev.data.filter(({ key }) => keyToRemove !== key)
}));
};
render() {
const { data, element } = this.state;
const { placeholder, listName } = this.props;
return (
<>
<div>{listName}: </div>
{data.map(({ name, key }) => (
<div style={{ display: "flex" }} key={key}>
<div>{name}</div>
<button onClick={() => this.remove(key)}>remove</button>
</div>
))}
<input
placeholder={placeholder}
onChange={e => this.setState({ element: e.target.value })}
/>
<button
onClick={() => this.add({ name: element, key: data.length + 1 })}
>
insert
</button>
</>
);
}
}
const Users = () => (
<List
initialData={[
{ name: "a", key: 1 },
{ name: "b", key: 2 }
]}
listName="Users"
placeholder="insert user"
/>
);
const Comments = () => (
<List initialData={[]} listName="Comments" placeholder="insert comment" />
);
const AnotherList = () => <Users />;
function App() {
return (
<div>
<Users />
<Comments />
<AnotherList />
</div>
);
}
ReactDOM.render(<App />, document.getElementById("app"));
HOCs are generally better for cross-cutting concerns, or behavior (not presentation) which you want to add to any component. for example logging:
const withLogging = Component => props => {
console.log('Props:', props);
return <Component {...props} />;
}
const List = ({ name, data }) => ...
const ListWithLogging = withLogging(List); // <-- This will log all props | {
"domain": "codereview.stackexchange",
"id": 37504,
"tags": "javascript, react.js"
} |
What exactly is Symbolic Model Checking? | Question: I know that Symbolic Model Checking is state space traversal based on representations of states sets and transition relations as formulas like in CTL using models like Kripke Model. I know the theory. But I'm finding it hard to understand the actual application. Where exactly is it used? What exactly does it do and how does it work?
Can someone explain with a real example and relate theory to practice?
Answer: Symbolic Model Checking is Model Checking that works on symbolic states. That is, they encode the states into symbolic representations, typically Ordered Binary Decision Diagrams (OBDDs).
The question is what do they do and how do they work.
You first have your source code for some application. You then transform your source code into some state-transition graph like a Kripke Structure. The states are filled with atomic propositions which describe what is true in that particular state. In Symbolic Model Checking the atomic propositions are encoded as OBDDs to save on space and improve performance.
The Model Checker then starts at some initial state, and explores the states, looking for errors in the state-transition graph. If it finds an error it will often generate a test case demonstrating the error. It uses the symbolic OBDDs to somewhat optimally navigate the state space. Wish I could explain more there but still learning.
But that's basically it. You have a program converted into a formal model (state-transition graph), and then you use symbolic optimizations to navigate the state space to look for errors (by comparing it against an LTL/CTL specification). And if an error is found, the Model Checker gives you some stuff to help document and solve it. | {
"domain": "cs.stackexchange",
"id": 11648,
"tags": "formal-methods, model-checking, software-verification"
} |
Segmentation fault with opencv function | Question:
I'm trying to detect a blob on a video feed with cvBlob lib. I think, I made a mistake with pointers, but I can't figure out where.
Moreover, do I have to free some of this variable?
The problem appears in row: unsigned int result=cvb::cvLabel(src_g, labelImg, blobs);
Mat& corridorProces(Mat& resultImg)
{
Mat srcMat=resultImg.clone();
cvtColor( resultImg, resultImg, CV_RGB2GRAY );
IplImage src= resultImg.clone();
IplImage *src_g= new IplImage(src);
IplImage *src_g_inv=new IplImage(src);
cvThreshold(src_g, src_g_inv,35,255, CV_THRESH_BINARY_INV);
cvThreshold(src_g, src_g,40,255, CV_THRESH_BINARY);
IplImage *labelImg=cvCreateImage(cvGetSize(src_g), IPL_DEPTH_LABEL, 1);
cvb::CvBlobs blobs;
unsigned int result=cvb::cvLabel(src_g, labelImg, blobs);
...
}
Originally posted by zweistein on ROS Answers with karma: 231 on 2013-03-06
Post score: 0
Original comments
Comment by Miquel Massot on 2013-03-06:
This question is not related to ROS. It's better suited in cvBlob project mailing list or similar, not here.
Comment by phbou72 on 2013-03-06:
Simply comment out all the code inside the function, and uncomment line by line until you find the one that give you a segmentation fault. It's not something related to ROS. It's a programming issue.
Comment by zweistein on 2013-03-06:
The weird thing is that the algorithm implenented in Opencv works for 1 image, but when I try it in ROS, where the cv_brigde returns a cv::MAT() and I convert into Iplimage, I receive segmentation fault error.
Answer:
I changed to: IplImage *src_g=cvCloneImage(&src); and now its working.
I dont understand why I got some times empty packages but now there is no more segmentation fault.
Originally posted by zweistein with karma: 231 on 2013-03-07
This answer was ACCEPTED on the original site
Post score: 0 | {
"domain": "robotics.stackexchange",
"id": 13229,
"tags": "opencv"
} |
Conversion of elastic moduli for plane stress case | Question: For isotropic material an elastic modulus, like Young's modulus and in combination with a Poisson ratio can be used to convert to any other elastic modulus, like for example the bulk modulus. Tables for these conversions exist, for example, on the wiki page of bulk modulus.
I'd like to use the conversions for FEM material models that are plane strain, plane stress or axisymmetric. What confuses me on the wiki page is that there is a mention of 3D material and 2D materials and these have different conversion formulae. It's unclear to me what 2D material means in this context. My questions:
Is it correct to use the 3D material conversion formulae for the plane stress case? My current thinking is that the conversions are valid for the plane strain and axisymmetric case as they model objects with 3D extend. But the plane stress case a models a thin object. Which formula should be used in that case?
Can anyone share a reference to where these conversions are derived, possibly including the 2D material conversions.
Answer: Simplified 2D analyses of 3D situations
Plain stress, plain strain and axisymmetric allow describing a domain in 2D, but it still represents a full 3D object. Therefore the material properties like Young's modulus and Poisson ratio are still the same as in 3D.
These 2D simplifications are "coded" in stress and strain tensors, which have some components equal to zero (or constant).
Relation between $E$, $\nu$ and $K$
Relation between Young's modulus $E$, Poisson ratio $\nu$ and bulk modulus $K$ in 3D comes from Hooke's law:
$$\epsilon_x = \frac{1}{E}\left(\sigma_x-\nu\sigma_y-\nu\sigma_z\right)$$
$$\epsilon_y = \frac{1}{E}\left(-\nu\sigma_x+\sigma_y-\nu\sigma_z\right)$$
$$\epsilon_z = \frac{1}{E}\left(-\nu\sigma_x-\nu\sigma_y+\sigma_z\right)$$
Now the volumetric change per volume unit $dV/V$ for small deformations is just a sum of normal strains, which may be simplified using average normal stress $\bar{\sigma}$ (~pressure):
$$\frac{dV}{V} = \epsilon_x+\epsilon_y+\epsilon_z = \left(\sigma_x+\sigma_y+\sigma_z\right)\cdot\frac{1-2\nu}{E} = 3\bar{\sigma}\frac{1-2\nu}{E}$$
Lastly, the bulk modulus as a ratio between pressure and volumetric change per unit is:
$$K = \frac{\bar{\sigma}}{dV/V} = \frac{E}{3\left(1-2\nu\right)}$$
2D materials
"2D material" relations from table at the end of wiki/Bulk_modulus may be useful for very special applications like single atom layers, where tangential stresses should not cause contraction in normal direction to the layer. | {
"domain": "engineering.stackexchange",
"id": 5030,
"tags": "materials, finite-element-method, solid-mechanics, material-science"
} |
Wait for TF listern queue to not be empty | Question:
I am noticing an issue with TF LIsteren in Electric that doesn't seem to be covered by other questions. Mainly, if I create a TF listener tf_, then immediately go into a callback that calls tf_.waitForTransform(...), I get:
"terminate called after throwing an instance of 'tf::ExtrapolationException'
what(): Unable to lookup transform, cache is empty, when looking up transform from frame ...."
What it seems is that the listener hasn't been active long enough to get any TF information before waitForTransform is called, and when you call waitForTransform on a listener with an empty cache/queue, is throws this exception deep down (I can't catch it at the level of calling waitForTransform) and the process is ended.
I'd like to be able to first look to see if the cache is not empty, or be able to catch this at the high level.
Any suggestions?
BTW, the only way I've found around this so far is to sleep for a couple of seconds after creating the listener and before calling waitForTransform().
Originally posted by pbeeson on ROS Answers with karma: 276 on 2012-02-15
Post score: 1
Answer:
The problem was that when running Gazebo (and it being VERY slow due to having walls -- I have a separate bug Trac on that that hasn't been answered), TF comes in much slower than the program executes, so waitForTransform bombs out (cannot catch exception because something catches it first and quits).
I can get around this by using canTransform() and throwing out the scan is that fails, or catching the TransformException on the transformPoint() call directly. That is easiest.
Originally posted by pbeeson with karma: 276 on 2012-02-15
This answer was ACCEPTED on the original site
Post score: 1 | {
"domain": "robotics.stackexchange",
"id": 8251,
"tags": "transform"
} |
Quantum Harmonic Oscillator eigenfunction | Question: I'm trying to understand why in quantum harmonic oscillator when finding ground state eigenfunction we don't use $a^\dagger$.
For a simple harmonic oscillator the Hamiltonian is given by $$H=\hbar\omega_0\left(\hat{a}\hat{a}^\dagger-1/2\right),$$ where the operators $\hat{a}$ and $\hat{a}^\dagger$ may be expressed as $$\hat{a}=\frac{1}{\sqrt{2}}\left(\xi+\frac{\partial}{\partial\xi}\right),$$ $$\hat{a}^\dagger=\frac{1}{\sqrt{2}}\left(\xi-\frac{\partial}{\partial\xi}\right),$$ where $\xi=\beta x$. You are required to find the expression for the normalized ground state eigenfunction.
This question is from my exam I just want to understand why when solution came we didn't use $a^\dagger$ its solution was given as:
Answer: You could have equally well used
$$
\hat{a}^\dagger= \frac{1}{\sqrt{2}}\int\!\!d\xi ~ |\xi\rangle \left(\xi-\frac{\partial}{\partial\xi}\right)\langle \xi|,
$$
instead, since the hermitian conjugate of your starting expression is
$$
\langle 0| \hat{a}^\dagger=0.
$$
You then have
$$
0=\langle 0| \hat{a}^\dagger = \frac{1}{\sqrt{2}}\int\!\!d\xi ~ \langle 0|\xi\rangle \left(\xi-\frac{\partial}{\partial\xi}\right)\langle \xi| \\=
\frac{1}{\sqrt{2}}\int\!\!d\xi ~ \left(\xi \psi^*_0(\xi) +\frac{\partial \psi^*_0(\xi) }{\partial\xi}\right)\langle \xi| ~~,
$$
the last step involving integration by parts and use of the definition
$\langle 0|\xi\rangle\equiv \psi^*_0(\xi) $.
Consequently, you get the same equation you had before,
$$
\xi \psi^*_0(\xi) +\frac{\partial \psi^*_0(\xi) }{\partial\xi}=0,
$$
with real solution,
$$
\psi^*_0(\xi) \propto e^{-\xi^2/2} ~,
$$
so you may complex conjugate and suitably normalize, etc.
It should be evident that the crucial step connecting states to functions is identical. | {
"domain": "physics.stackexchange",
"id": 85451,
"tags": "quantum-mechanics, hilbert-space, operators, harmonic-oscillator, ground-state"
} |
Why are grams usually only expressed as milligrams, grams or kilograms? | Question: I'm a physics (and electronics and astronomy, etc.) enthusiast. As I learn and research topics, I notice that many SI units are often expressed using a variety of prefixes, such as in electronics where we use microvolts, millivolts, volts, kilovolts, and sometimes megavolts. In computer storage, the prefixes kilo, mega, giga, and tera are very familiar.
In physics, however, for large quantities of mass, I usually just see kilograms used with scientific notation:
$$2\times10^6 kg$$
This could also be expressed as 2 gigagrams, but I've never heard anyone use that particular unit (which might be why it sounds silly). I understand that it is impractical to use a prefix for something as large as the mass of the sun, $2\times10^{30}$ kg, but wouldn't it be more appropriate to use grams, as in $2\times10^{33}$g?
Is this simply out of convention, or is there a more logical reason?
Answer: It's a weird quirk of the SI system that the base unit of mass is the kilogram, not the gram. So you'll see a lot of things expressed in kilograms.
Of course, scientists in a given field tend to standardize on certain choices of units without any regard to the SI recommendations. And this makes sense; the units you use should be the ones that make your values most understandable for the intended audience. SI is only intended as a fallback to enable unambiguous communication between groups that don't otherwise have a shared convention (especially between experimentalists and theorists). So sometimes you'll see quantities expressed in grams or tons or solar masses or whatever because that is the standard in the context you're looking at. | {
"domain": "physics.stackexchange",
"id": 18907,
"tags": "soft-question, conventions, si-units"
} |
What is the structure of iodide of Millon's base? | Question: In qualitative analysis, $\ce{NH4+}$ ions are confirmed using Nessler's reagent:-
$$\ce{NH4+ + 2[HgI4]^{2−} + 4OH− → HgO·Hg(NH2)I ↓ + 7I^{−} + 3H2O}$$
The brown precipitate formed is called iodide of Millon's base. It is also written as $\ce{3HgO.Hg(NH3)2I2}$ and as $\ce{NH2.Hg2I3}$. (wikipedia).
But its structure is present differently in different sites:-
Source 1 - ionic form
Source 2 - Here the reaction is given differently:-
$$\ce{([K2]HgI4 <=> 2KI + HgI2) * 2}$$
$$\ce{HgI2 + NH3 -> Hg[NH2]I + NH4I}$$
$$\ce{Hg[NH2]I + HgI2 -> NH2Hg2I3 }$$
$$\ce{[K2]HgI4 + 2NH3 -> NH2Hg2I3 + 4KI + KH4I }$$
structure -
Source 3- non ionic form
Source 4 - Hydrate form; Here also the reaction is different:-
$$\ce{[K2]HgI4 -> 2KI + HgI2}$$
$$\ce{HgI2 + NH3 -> Hg[NH2]I + HI}$$
This site also says that its structure is still uncertain. Various other structure has been proposed:-
So, what is the correct reaction, correct formula and correct structure of iodide of Millon's base?
Answer: First, ignore any monomeric formula. The compound is definitely polymeric.
The literature contains hints for existence of two separate lines of compounds. The first one is salts of polyamidomercury: $\ce{[Hg(NH2)]_{n}A_{n}}${1} with mercury-amide chains in the structure, and $\ce{(Hg2N)_{n}A_{n}}$ with structure based on crystoballite {2}{3}
It seems thatn the lines are not distinguished in earlier literature, but recent researchers use the words "Millon's base salts" for $\ce{(Hg2N)_{n}A_{n}}$ only. | {
"domain": "chemistry.stackexchange",
"id": 4997,
"tags": "inorganic-chemistry, experimental-chemistry, structural-formula, halides"
} |
Radioactive decay - What mechanism decides when an unstable nucleus decays? | Question: My first question on Stackexchange (if it is formatted wrong or something please tell me so I know in future) - here it is:
Given an unstable nucleus (exactly which nucleus is not particularly pertinent) - what decides precisely when it will decay?
I am somewhat familiar with the concept- a nucleus becomes 'unstable' when the mutual repulsion between the protons exceeds the binding force of the strong nuclear force. That being said, if we were to theoretically isolate a single unstable atom, there are a few possibilities (correct me if I am wrong):
atom immediately decays
atom decays x seconds later
atom doesn't decay
There is an unpredictable nature (which I assume arises from something to do with quantum uncertainty). But getting back to my question, what is it that suddenly makes an 'unstable nucleus' decay?
Any further reading would be appreciated, although nothing too complicated (university level is my limit I think).
UPDATE
Thanks all, my first time using this forum and I was not let down :)
With regards to the question itself, I'm a bit disappointed that there is no precise mechanism, but I guess Einstein was wrong when he quoted: "God does not play dice"
Answer: As far as we know, nuclear decay is truly random, that is, random in the quantum mechanical sense. That is, when you observe the system, there is a probability that you will see the decay products rather than the original nucleus, because the wave function of the system is a superposition of the parent nucleus state and the daughter nucleus state (+alpha particles or whatever). As time goes on, the coefficients of the superposition evolve so that the probability of observing the parent nucleus approaches zero while the probability of observing the daughter nucleus (or further decay products) approaches one.
Thus, there is nothing immediately happening that causes the nucleus to decay; rather, as the parent nucleus is unstable, time evolution gradually eliminates it from the system in a continuous manner! In the Copenhagen interpretation, observing the nucleus causes it to collapse into a state where you can definitely ascertain whether it has decayed or not. The longer you wait, the more likely. | {
"domain": "physics.stackexchange",
"id": 25743,
"tags": "nuclear-physics, radiation, radioactivity, determinism, strong-force"
} |
How does ammonium chloride increase the solubility of magnesium hydroxide in water? | Question: In my textbook it is written that $\ce{Mg(OH)2}$ is slightly soluble in water, and it forms a milky solution which is called "milk of magnesia".
Its solubility can be increased by the addition of $\ce{NH4Cl}.$ But how? There seems to be no reason.
Answer: There is a strong reason. But it is the chemical reaction, not just a better dissolution.
$\ce{Mg(OH)2}$ is a base with the limited solubility, defined by $K_\mathrm{sp}=[\ce{Mg^2+}][\ce{OH-}]^2$
$\ce{NH4+}$ ion, created by $\ce{NH4Cl}$ dissolution, acts as a weak acid:
$$\ce{NH4+ + H2O <<=> NH3 + H3O+}$$
with $\mathrm{p}K_\mathrm{a}=9.25$
$\ce{OH-}$ ions formed by dissolution of $\ce{Mg(OH)2}$ are eliminated by recombination
$$\ce{OH- + H3O+ <=>> 2 H2O}$$
what supports the dissolution by keeping the product of ion concentrations below the $K_\mathrm{sp}$.
Effectively, there is ongoing equilibrium:
$$\ce{Mg(OH)2 v + 2 NH4+ <=> Mg^2+ + 2 NH3 + 2 H2O}$$ | {
"domain": "chemistry.stackexchange",
"id": 13047,
"tags": "inorganic-chemistry, acid-base, solubility"
} |
Expected distance between tree nodes | Question: I have been given a tree with n nodes and n-1 edges with it's weight.
There are two people A and B.
I have been given a list of nodes of size k.
A will pick a random node x from this list and B will independently pick a random node y from this list.
I have to find expected distance between these two nodes.
My way of solving it was to find the distance between all the (k*(k-1)/2)nodes of the list and dividing it by number of nodes in the list.
for ex:
n=6,k=6 list=[1,2,3,4,5,6]
Node--------> 1
\(1)<-----------Weight
\
3
(3) / \(2)
/ \
4 2
(4) / \ (5)
/ \
5 6
My answer was coming out to be 87/6 but the actual answer was 29/6.Please help me find whatever i am doing wrong here.
Answer: Let $d(i,j)$ denote the distance between $i$ and $j$. Calculation shows that
$$
\sum_{i<j} d(i,j) = 87.
$$
Hence the average distance is
$$
\frac{1}{6^2} \sum_{i,j=1}^6 d(i,j) = \frac{1}{36} \left(\sum_{i<j} d(i,j) + \sum_{i>j} d(i,j) + \sum_{i=j} d(i,j)\right) = \frac{2\cdot 87}{36} = \frac{29}{6}.
$$
A simple way to do the calculation is as follows. Suppose that there are $n$ nodes, and that edge $e$ of weight $w_e$ cuts the tree into parts of size $k_e,n-k_e$. Then the average distance is
$$
\frac{2}{n^2} \sum_e w_e k_e (n-k_e).
$$
For example, in our case we get
$$
\frac{2}{36} (1 \cdot 1 \cdot 5 + 2 \cdot 1 \cdot 5 + 3 \cdot 3 \cdot 3 + 4 \cdot 1 \cdot 5 + 5 \cdot 1 \cdot 5) = \frac{29}{6}.
$$ | {
"domain": "cs.stackexchange",
"id": 7667,
"tags": "algorithms, graphs, trees"
} |
Mirror Transportation | Question: Why do people transport mirrors in inclined position? I frequently find people carrying mirrors in slant position on mini trucks.I guess it has some advantage of carrying them without breaking,and I am now so curious to know the reason behind it
Answer: "The angle of inclination or lean of the glass should be 3° from the vertical on
static racks. For transportable racks, pallets and stillages, an angle of 5° – 6° is recommended. If the angle is increased above 6°, it will tend to put extra load
on the back sheets of the stack and may cause breakage."
Source: Glass and Glazing Federation, Code of Practice for Glass Handling and Storage. http://www.ggf.org.uk/assets/GGF%20Code%20of%20Practice%20for%20Glass%20Handling%20and%20Storage-4d4a73cca02f6.pdf
So basically, as far as I understand, it is a tradeoff between workers' safety (higher inclination reduces risk of accidents by falling glass) and stability of the glass (smaller inclination equals lower risk of breaking). | {
"domain": "physics.stackexchange",
"id": 12417,
"tags": "everyday-life"
} |
contact force sensing | Question:
It seems that there is a bump/force sensor that works for contact on a link. However, does this scale or is there another implemented sensor that would allow me to get contact forces all over a robot arm? Essentially, I'd like to simulate tactile sensing. I have implemented and used something similar in ODE directly, but wondered if the contact joint API was exposed in Gazebo. Or if it would be possible to write a plugin to do get the forces. Any help would be much appreciated as I'd really rather use Gazebo.
thanks.
Originally posted by mkillpack on Gazebo Answers with karma: 23 on 2012-11-27
Post score: 2
Answer:
You should use a ContactSensor. Attach the sensor to a link, and it will report all the contacts on that link. This includes each contact's position, normal, depth, and forces.
Note: Some parts of the contact sensor will only work with the upcoming Gazebo 1.3 release.
Originally posted by nkoenig with karma: 7676 on 2012-11-27
This answer was ACCEPTED on the original site
Post score: 1 | {
"domain": "robotics.stackexchange",
"id": 2828,
"tags": "gazebo-sensor, contact"
} |
Why are Shack-Hartmann sensors so expensive (4k+ USD)? | Question: Recently I am searching for a Shack-Hartmann wavefront sensor for university laboratory usage. I was expecting my target sensor to be cheap, which is:
Low spatial wavefront resolution (50 x 50) for beam measurement;
Low frame rate;
Portable design, and easy to use.
Most quotations are beyond my expectation. On ThorLabs it costs 4k USD (which is, "cheaper" compared to most competitors):
https://www.thorlabs.com/thorproduct.cfm?partnumber=WFS30-5C
Following reasons may to blame for this high price:
Expensive industrial sensors. But for https://www.flir.com/products/grasshopper3-usb3/?model=GS3-U3-14S5M-C it takes only 1.7k USD for a high performance sensor.
Microlens array (MLA) fabrication?
MLA-sensor alignment?
Software development? (SDK & GUI)
But I am not convinced by myself.
Any ideas why a Shack-Hartmann sensor is so expensive?
Answer: I have been deeply involved in both Shack-Hartmann and lateral-shear polarization interferometers. Now I want something simple and slow for hobby projects and had the same question. I don’t think such a project is as daunting as might be imagined. One would like 12 bits in the camera, the Sony CMOS cameras are so good, the last bit is nearly noise free.
The camera need not be expensive: Any amateur telescope web store will have dozens for less than $400. You want monochrome, preferably USB 3, with the largest diagonal focal plane size you can afford. A Global shutter is nice if you have a dynamic situation. Rolling shutters are generally lower noise.
A lens array will be expensive. Thorlabs, Seuss, and RPC Photonics, and others sell them for \$400-\$600. The Thorlabs arrays are mounted. In the end you want the focal spot to be at least 2 pixels across. Choose your micro lens F/# accordingly, usually quite slow, F/20 or more. Likely you want the closest pitch you can find as this sets the spatial resolution. A 4 x 5 mm focal plane will view 40 x 50 micro lens spots. Good enough.
You will need to craft a mount that mates the camera and the lenslet array at the micro lens focus distance. They are slow, so the depth of field should be forgiving.
The real crux is the software. There is public domain software that is close (git has AOtools) and there may be more. You will need to take a dark exposure for the shutter time used (I typically take 100 frames and average them). Then the reaL GOTCHA issue: What to use as a reference? You can use a shear plate to focus a collimated light source (a single mode VCSEL works well), but you are unlikely to have a shear plate about. Place the VCSEL or other point source bright enough to see (an LED?) as far away in a dark room as you can manage. Don’t use a mirror or lenses, turn off heating and cooling, etc. This gives you a known curvature. for 100 μm lenslets at F/20 the focal length is 2mm so a tilt of 1/2 a subaperture (to keep things easy) or 50 μm is only 25 milliradians (mr), something over a degree. The edge of the camera must see the source as no larger than this angle. So if you have 50 subapertures across your camera the edge is 2.5 mm from the middle. Thus 2.5e-3 meters / d = 2.5e-3 radians so the source needs to be more than a meter away. Easy! With the distance from the source to the focal plane known you can now easily calculate the tilt distribution (should look spherical).
Use openCV or your favorite utility to get images into your computer and a) subtract background b) make sure the camera does not saturate (adjust exposure time, retake the dark background and try again) c) calculate the peak intensity of each spot and which pixel coordinates that is in d) Draw a box around each focal spot which is several spot diameters across each spot e) calculate the centroid of each spot using only pixels in the box (the rule is add no pixels that do not contribute to your signal, the centroid). You can play with the box size. An F/20 blue-green 500 nm wavelength spot from a 100 μm lenslet will be about lambda F/# across (just 20 wavelengths, about 10 μm) which should be 3 to 4 pixels so a box 16 x 16 pixels should be a good place to start. The centroids should be good to a small fraction of a pixel. I have done better than 1/100 pixel with a really good focal plane array and 1/20 pixel for standard cameras such as the above. Now you can calculate the centroid location within the box, and reference that to the whole array. A plot across a slice will give you a linear function which is somewhat less than the lenslet pitch. Do this in both X and Y. You may have a rotation in the spot coordinates. You can physically correct this, or use math. The slope in X and Y should be the same. If not, the lens array is tilted. If you move the light further away, the spots should move to be more directly behind the lenslets.
In passing I would note that I found the accuracy of the lithography on the imager, and used to make the lenslet array was better than my best tilt calibration! Thus for a 3.5 μm pixel and 100 μm lenslet the spots should be 100/3.5 pixels apart. This will be independent of focus etc. I also found trying to uniformly illuminate the array to correct for the variation in responsivity pixel by pixel generated more noise, not less. Silicon is awfully good.
The last calibration needed is to define where zero tilt is. This should be close to normal to the focal plane array, so pick the middle lenslet and the pixel you declare is on boresight. Now you have the subaperture tilts calibrated.
The last messy task is to convert from wavefront subaperture tilt to wavefront. There are books written on the subject. Notionally if you start at your zero tilt and add up the subaperture tilts as you move across the array, subtracting the zero tilt position of each subaperture, you have wavefront. Note you can take any path from the center to the subaperture you want to know and add up the X and Y tilts and you should get the same answer. Ideally you want to take the average of all possible paths (that are illuminated) as that would be the best average value you could get. This is referred to as the minimum RMS error. There are many ways to calculate this. If you think of the tilts as the gradient in phase, and calculate the divergence of the gradients, you have a measure of the wavefront curvature. It is also simply Laplace’s equation which can be solved by successive over-relaxation, or if you like fancy algorithms, use a multigrid method (it’s way faster). Greg Allen’s thesis has another approach, https://dspace.mit.edu/bitstream/handle/1721.1/120381/1084482108-MIT.pdf?sequence=1&isAllowed=y . There are many more.
In summary, you can build a Shack-Hartmann wavefront sensor for about $1000 and a lot of software development. The Thorlabs sensor seems cheap given the effort, at least until someone comes up with an open source sensor and software.
Jon | {
"domain": "astronomy.stackexchange",
"id": 5496,
"tags": "optics, adaptive-optics"
} |
LTTNg tracetools examples with ROS1 | Question:
Greetings ,
I have been trying to make sense of using LTTNg along with some C++ ROS scripts .
I stumbled upon the following ,
https://github.com/boschresearch/ros1_tracetools
If anyone has worked on tracing kindly share some insights .
Some basic PUB-SUB examples would be enough
For future reference , there are relevant articles i have come across .
https://christophebedard.com/ros-tracing-message-flow/
https://github.com/christophebedard/tracecompass_ros_testcases
https://lttng.org/blog/2018/03/21/lttng-scope-0.3-a-basic-tutorial/ , THIS IS WHAT REALLY HELPED ME TOWARD THE END
I made a screen recording of how the
above example can be run , hope it is
helpful to someone
https://youtu.be/fRDO-b5A4CM
Relevant tools
https://lttng.org/files/lttng-scope
https://babeltrace.org/
I understand that , there is an official package on this for ROS2 . (as ROS2 is build with RT in mind)
Kindly share anything on this topic .
Originally posted by chrissunny94 on ROS Answers with karma: 142 on 2020-10-27
Post score: 0
Original comments
Comment by christophebedard on 2020-10-28:
what are you looking for exactly? Are you looking for info on show to set it up + run it with LTTng?
Comment by chrissunny94 on 2020-10-28:
Hi @christophebedard thanks for responding , I guess you are the right person for this .
Anyhow what i would like is some info into how to run this with a simple Publisher/Subscriber .
My current understanding is we have to place ROS::TRACE commands is at critical break points .
(Similar to how one would place ROS_INFO , for debug purpose )
Comment by chrissunny94 on 2020-10-28:
I did a lot of digging and didnt find any modifications with the C++ code with these examples you have provided . But everything seems to be happening from the script itself .
I guess this is where my confusion is .
https://github.com/christophebedard/tracecompass_ros_testcases/blob/melodic-devel/tracecompass_ros_testcases/scripts/trace.sh
Answer:
To get it working, you need a few things:
LTTng https://lttng.org/docs/
a few repos, as listed in this file: https://github.com/christophebedard/tracecompass_ros_testcases/blob/melodic-devel/tracing.repos
modified ROS code that includes the tracing instrumentation: see the ros_comm and roscpp_core repos in the tracing.repos file
tracetools, i.e. what actually calls LTTng; this is used/called by the modified ROS code (see tracing.repos file)
some ROS user code, e.g. my examples/test cases repo
My current understanding is we have to place ROS::TRACE commands is at critical break points . (Similar to how one would place ROS_INFO , for debug purpose )
Not really. The instrumentation is only for ROS itself, e.g. to provide information about message publication and subscription. You do not need to modify your own code for this. You just need to use the modified/instrumented ROS code and trace it all.
And it is not a replacement for ROS_INFO. If that's what you want, you should consider other solutions.
But everything seems to be happening from the script itself .
I guess this is where my confusion is .
As for actually tracing everything, your nodes/ROS is just an application that can be traced using LTTng. Therefore you should check out the LTTng documentation: https://lttng.org/docs/. The trace.sh script is just a simple script that uses LTTng commands (through another script) to enable the right tracepoints, start tracing, run a given application, and stop tracing + cleanup.
Originally posted by christophebedard with karma: 641 on 2020-10-30
This answer was ACCEPTED on the original site
Post score: 1
Original comments
Comment by chrissunny94 on 2020-11-01:
Awesome and a lot of Thanks Christopher | {
"domain": "robotics.stackexchange",
"id": 35683,
"tags": "ros"
} |
About reduction relation between $HP$ and $\mathcal{E}\mbox{*}$ | Question: I'm studying Theory Of Computation and have some questions in the beginning:
About reduction relation between $HP$ and $\mathcal{E}\mbox{*}$
$HP =$ {$<M,w>$ $|$ $M$ is a $TM$ and it halts on string $w$}.
$\mathcal{E} =$ {$0$,$1$}
why this is true : $\mathcal{E}\mbox{*} \leq HP$
but this is not true :
$HP\leq\mathcal{E}\mbox{*}$
I have read that IF the second argument is true then we get that :
$HP$ $\epsilon $ $ R$ which I know that it is not true.
I'm new to this topic could someone give an explination on these points?
Answer: Note that any computable language $L$ can be turing-reduced to any other language $L'$ which contains at least one yes-instance and one no-instance (i.e. we have $L \leq L'$) by letting the reduction mapping do all the computation and then mapping to a yes-instance of $L'$ if the original instance is in $L$ or a no-instance of $L'$ if it is not, so naturally a decidable language like $\mathcal{E}^\ast$ can be reduced to the halting problem.
However, we cannot reduce the halting problem $\text{HP}$ to $\mathcal{E}^\ast$ as $\mathcal{E}^\ast$ (assuming that your alphabet is $\{0,1\}$) contains every possible input and thus has no no-instance. Therefore it is not possible to find a function that maps the no-instances of $\text{HP}$ to the no-instances of $\mathcal{E}^\ast$ as there are none (or more differently: since $\mathcal{E}^\ast$ is decidable, we cannot reduce $\text{HP}$ to it as it is undecidable and the existence of a reduction would contradict that -- can you see why?) | {
"domain": "cstheory.stackexchange",
"id": 4067,
"tags": "cc.complexity-theory, reductions, universal-turing-machines"
} |
Equivalence of states between two "quasi-deterministic" strongly connected Büchi automata accepting the same $\omega$-language | Question: Hope someone can point me to the right direction to solve this problem.
Premise.
I call quasi-deterministic Büchi automaton (qDBA) a Büchi automaton $B = \langle S, \Sigma, S_0, \delta, F \rangle$, where $S$ is the set of states, $\Sigma$ the alphabet of transition labels, $S_0 \subseteq S$ the set of initial states, $\delta: S \times \Sigma \rightarrow S$ a partial transition function, and $F \subseteq S$ the set of accepting states. That is, a Büchi automaton where the only place of nondeterminism is in the initial state, while the transition function is deterministic.
An $\omega$-word is accepted by $B$ iff any of the infinite runs induced by the word on $B$ passes infinitely many times through some states of $F$.
Given a qDBA $B$ and a state $s \in S$, let $B/s = \langle S, \Sigma, s, \delta, F \rangle$ denote the deterministic Büchi automaton accepting all the $\omega$-words inducing runs of $B$ starting from $s$ (where $s$ may also be a non-initial state in B). Two states $s_1$ of the qDBA $B_1$ and $s_2$ of the qDBA $B_2$ are equivalent iff the language of the $\omega$-words accepted by $B_1/s_1$ is the same as the one accepted by $B_2/s_2$.
Finally, I say that $B$ is strongly connected if for any two states $s_1$ and $s_2$ there is a finite path connecting $s_1$ to $s_2$ and vice versa.
Question. Let $B_1 = \langle S_1, \Sigma, S_{0,1}, \delta_1, F_1 \rangle$ and $B_2 = \langle S_2, \Sigma, S_{0,2}, \delta_2, F_2 \rangle$ be two strongly connected qDBAs accepting the same $\omega$-language, $L(B_1) = L(B_2) = L$. I want to prove that for every state $s_1 \in S_1$ there exists at least one state $s_2 \in S_2$ equivalent to $s_1$.
Does this property hold in general? I tried to construct counterexamples to this property, but in every case I could think of, in order to falsify the conclusion, I always needed either to drop the assumption that $\delta_2$ was a deterministic transition function, or that $B_2$ was strongly connected.
I have a strong sense this property should hold. It might even be trivial, but maybe I am using the wrong vocabulary and I cannot find this result.
My intuition so far. Suppose there are two $\omega$-words $w'$ and $w''$ accepted by $B_1/s_1$. Since $B_1$ is strongly connected, one can construct accepted words by starting with the first symbols of $w'$, then taking a path back to $s_1$, then continuing with some symbols from $w''$, then back to $s_1$, and again following $w'$ in any combination. Since $B_2$ also accepts all of these combinations but has a finite number of states, the runs corresponding to these words must eventually reach some state $s_2$ from which both $w'$ and $w''$ are possible continuations of the runs. This must hold for every set of words accepted by $B_1/s_1$. But I am not able to turn this (possibly wrong) reasoning in general, formal terms. Could you please help me do so, if possible?
Answer: Your property does not hold.
Consider the following languages over $\Sigma = \{a,b\}$:
There are infinitely many $a$s at even positions in a word
There are infinitely many $a$s at odd positions in a word
Both are representation as DBAs with the same set of states (of size 3). We can build DBAs for them that only differ in their initial states.
If we want to build an automaton for the disjunction of these languages, we can do so by taking both initial states for both of them as new initial states. This gives us a qDBA $B_1$.
The disjuction is equivalent to the language "there are infinitely many $a$s", for which a DFA $B_2$ only needs two states, each of which have the same language.
Now, $B_1$ and $B_2$ are a counter-example to your conjecture. | {
"domain": "cs.stackexchange",
"id": 15902,
"tags": "automata, buchi-automata, connected-components, omega-automata"
} |
Find sequence by adding 5 or multiplying by 3 | Question: I wrote this little JavaScript function that starts with the number 1 and continually either adds 5 or multiplies by 3. This function tries to find a sequence of additions and multiplications that find that number.
Keep in mind, this function does not necessarily find the shortest sequence of operations.
I am looking for a review of my code and possible ways to make it more optimized. I'm also open to suggestions about how I can go about having this function find the shortest sequence of operations.
My code:
function findSequence(goal) {
function find(start, history) {
if (start == goal)
return history;
else if (start > goal)
return null;
else
return find(start + 5, "(" + history + " + 5)") ||
find(start * 3, "(" + history + " * 3)");
}
return find(1, "1");
}
Test:
print(findSequence(178));
Output:
((((((((((((((1 + 5) + 5) + 5) + 5) + 5) + 5) + 5) + 5) + 5) + 5) + 5) * 3) + 5) + 5)
Answer: This question is a bit related to a previous question. I will give you the same advice I did to the asker of the previous question.
Work from the other direction.
Currently, you're branching out starting from 1 trying to reach 178. Instead, start from 178 and try to reach 1. I'll show you how that will make the problem a lot easier.
We're at 178. Can we divide by 3? 178 mod 3 != 0 so no. Instead we reduce by 5.
178 - 5 == 173. Can we divide by 3? 173 mod 3 != 0 so no, we reduce by 5 instead.
173 - 5 == 168. Can we divide by 3? 168 mod 3 == 0 so yes, we can. Let's do that.
168 / 3 == 56. Wait a minute... this number ends with a 6... Interesting. (56 - 1) mod 5 equals zero, so from here we can just reduce by 5 until we've reached our target of 1.
Instead of branching out in an exponential manner, we've reduced the problem to a linear approach. Simply by flipping things backwards.
Let's say that we don't want to do the end loop of reducing by 5 just because we can (we want the shortest path, right?), then I see no other solution than to branch out recursively. When checking for the division by 3, a lot of branches are removed so it will be possible to find the shortest path by branching.
I'll leave the fun part of implementing this up to you :)
Some comments about your current code
(Which, given the comments above you should remove and completely re-write using the "backwards" approach)
In Java at least, it's recommended to add braces for each if. Also, as you're using return inside each if there's no need for else.
function findSequence(goal) {
function find(start, history) {
if (start == goal) {
return history;
}
if (start > goal) {
return null;
}
return find(start + 5, "(" + history + " + 5)") ||
find(start * 3, "(" + history + " * 3)");
}
return find(1, "1");
}
I would also add a parameter to allow switching the start value, it will make it more flexible very easily.
I think it's good that you're hiding the find method inside the other function. | {
"domain": "codereview.stackexchange",
"id": 8312,
"tags": "javascript"
} |
Biot-Savart's law in magnetic medium | Question: I have read that in a medium, $B$ changes but $H$ does not. So does this mean that in the expression for biot-savart's law $\mu_0$ can be replaced by $\mu$? I tried to look for biot-savart's law in medium, but could not find anywhere. Everywhere $\mu_0$ is used.
Is my guess correct?
Answer: If your medium is isotropic, homogeneous, time independent, and fills all space then yes it would be valid. Note that if the last assumption falls (which is most often the case), you can have problems due to boundary conditions at the interface. Physically, you can have surface currents at the boundary, giving rise to additional magnetic fields. These boundary effects can be neglected if your medium filled region is large enough so that you can be sufficiently far from the boundaries in a restricted region inside it.
Note that you have similar problems for dielectric materials with induced charges at its surface. Check out Griffith's Introduction to Electromagnetism for more information.
Hope this helps and tell me if something's not clear. | {
"domain": "physics.stackexchange",
"id": 87989,
"tags": "electromagnetism, magnetic-fields"
} |
Cannot clone a package from github (publickey) | Question:
I want to do eye-to-hand using the following package https://ros-planning.github.io/moveit_tutorials/doc/hand_eye_calibration/hand_eye_calibration_tutorial.html , however when I am cloning the git repository I am getting this error
git clone git@github.com:ros-planning/moveit_calibration.git
Cloning into 'moveit_calibration'...
git@github.com: Permission denied (publickey).
fatal: Could not read from remote repository.
Please make sure you have the correct access rights
and the repository exists.
Originally posted by akumar3.1428 on ROS Answers with karma: 61 on 2022-10-11
Post score: 0
Answer:
Thank you ! I followed the following commands to add public ssh key to my github
cd ~/.ssh
ssh-keygen -t rsa -b 4096 -C "your email id "
eval $(ssh-agent -s)
ssh-add
Open VS Code and copy ssh
cd ~/.ssh
code .
login into github
go to setting
go to ssh and gp keys
add new key
paste it
Originally posted by akumar3.1428 with karma: 61 on 2022-10-11
This answer was ACCEPTED on the original site
Post score: 0 | {
"domain": "robotics.stackexchange",
"id": 38039,
"tags": "ros"
} |
Fluid to particles under newtonian gravity | Question: How to start with a perfect fluid concept and reach (by approximations through certain mathematically well defined assumptions) to the concept of particle ? Here newtonian gravitation is being assumed. The state equation of matter can be assumed to be that of dust.
EDIT
Assume any initial mass distribution $\rho(r,t=0)$ and momentum distribution $p(r,t=o)$ and state equation of dust.
EDIT 2
I don't intend to throw away the fluid concept,but still want to arrive at a particle as some form of a mass distribution.One example could be $\rho_p(r,t)$ which has a compact support in r describing a particle.
EDIT 3
Assume there are a number of such particles as described in the earlier edit part of the question as $\rho_p(\vec{r}-\vec{r_i},t=0)$ for $i = 1,2,3,...$. distributed arbitrarily in space such that no two are overlapping. This could be considered as $\rho(r,t=0) = \displaystyle\sum\limits_{i=0}^n \rho_p(\vec{r}-\vec{r_i},t=0)$. Similarily for momentum distibution at $t=0$ as $\vec{p}(r,t=0) = \displaystyle\sum\limits_{i=0}^n \vec{p_p}(\vec{r}-\vec{r_i},t=0)$. What would eventually happen to such a system under its own newtonian gravity. Just out of curiosity what would happen if GR is assumed.
Answer: If I understand your question correctly you want to get from Gauss' gravitational law to Newton's.
$$ \nabla \cdot \textbf{g} = -4 \pi G \rho$$
Integrating both sides over a volume $V$ enclosing a mass $M$ and having surface $S$.
$$\int_{V} \! \nabla \cdot \textbf{g } \mathrm{d}\textbf{V} = -4 \pi G \int_{V} \! \rho\ \mathrm{d}\textbf{V}$$
$$\int_{V} \! \nabla \cdot \textbf{g } \mathrm{d}\textbf{V} = -4 \pi G M$$
Using the divergence theorem: $\int_{V} \! \nabla \cdot \textbf{g } \mathrm{d}\textbf{V} = \oint_{\partial{V}} \textbf{g} \cdot \mathrm{d}\textbf{S}$
$$\oint_{\partial{V}} \! \textbf{g} \cdot \mathrm{d}\textbf{S} = -4\pi GM $$
Assuming that $\textbf{g}$ is constant over $\mathrm{d}\textbf{S}$, and that the force is centripetal we can write
$$\textbf{g} = g(r){\textbf{e}_r}$$
where $\textbf{e}_r$ are the unit vectors of the acceleration that point towards the center on the surface $S$ and g(r) gives us the magnitude of the acceleration at radius $r$.
Our equation then becomes:
$$g(r)\oint_{\partial{V}} \! \textbf{e}_r \cdot \mathrm{d}\textbf{S} = -4\pi GM $$
$$g(r) 4\pi r^2 = -4\pi GM $$
$$g(r) = -\frac{GM}{r^2} $$ | {
"domain": "physics.stackexchange",
"id": 124,
"tags": "newtonian-gravity"
} |
Windows Forms `ControlCollection` implementation | Question: I've implemented my own version of the Control.ControlCollection class in System.Windows.Forms and obviously I want it reviewed.
I've tried to make it thread-safe all around, but I'm sure I missed something.
My main concern is obviously the thread-safety of it, but I also would like any other comments as well.
The GitHub version as of this code is at: Control.ControlCollection.
using System;
using System.Collections;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Evbpc.Framework.Windows.Forms
{
public abstract partial class Control : Component, IComponent, IDisposable
{
/// <summary>
/// Represents a collection of <see cref="Control"/> objects.
/// Is not actually inherited from ArrangedElementCollection. ArrangedElementCollection was removed from this implementation.
/// </summary>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection(v=vs.110).aspx
/// </remarks>
[ListBindable(false)]
public class ControlCollection : IEnumerable, ICloneable, IList, ICollection
{
private object _syncRoot = new object();
private object _internalSyncRoot = new object();
private List<Control> _controls;
private Control _owner;
#region Constructors
/// <summary>
/// Initializes a new instance of the <see cref="Control.ControlCollection"/> class.
/// </summary>
/// <param name="owner">A <see cref="Control"/> representing the control that owns the control collection.</param>
public ControlCollection(Control owner)
{
_owner = owner;
_controls = new List<Control>();
}
#endregion
#region Properties
/// <summary>
/// Gets the number of elements in the collection.
/// </summary>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.layout.arrangedelementcollection.count(v=vs.110).aspx
/// </remarks>
public virtual int Count
{
get
{
lock (_internalSyncRoot)
{
return _controls.Count;
}
}
}
/// <summary>
/// Gets a value indicating whether the collection is read-only.
/// </summary>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.layout.arrangedelementcollection.isreadonly(v=vs.110).aspx
/// </remarks>
public virtual bool IsReadOnly => false;
/// <summary>
/// Indicates the <see cref="Control"/> at the specified indexed location in the collection.
/// </summary>
/// <param name="index">The index of the control to retrieve from the control collection.</param>
/// <returns>The <see cref="Control"/> located at the specified index location within the control collection.</returns>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/333f9hk4(v=vs.110).aspx
/// </remarks>
public virtual Control this[int index]
{
get
{
lock (_internalSyncRoot)
{
return _controls[index];
}
}
}
/// <summary>
/// Indicates a <see cref="Control"/> with the specified key in the collection.
/// </summary>
/// <param name="key">The name of the control to retrieve from the control collection.</param>
/// <returns>The <see cref="Control"/> with the specified key within the <see cref="Control.ControlCollection"/>.</returns>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/s1865435(v=vs.110).aspx
/// </remarks>
public virtual Control this[string key]
{
get
{
lock (_internalSyncRoot)
{
foreach (Control c in _controls)
{
if (c.Name == key)
{
return c;
}
}
throw new KeyNotFoundException();
}
}
}
/// <summary>
/// Gets the control that owns this <see cref="ControlCollection"/>.
/// </summary>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection.owner(v=vs.110).aspx
/// </remarks>
public Control Owner => _owner;
#endregion
#region Methods
/// <summary>
/// Adds the specified control to the control collection.
/// </summary>
/// <param name="value">The <see cref="Control"/> to add to the control collection.</param>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection.add(v=vs.110).aspx
/// </remarks>
public virtual void Add(Control value)
{
lock (_internalSyncRoot)
{
InternalAdd(value);
}
}
private void InternalAdd(Control value)
{
if (_owner == value)
{
throw new ArgumentException($"The control {nameof(value)} cannot be the same as the {nameof(Owner)}.");
}
value.TabIndex = _controls.Count;
_controls.Add(value);
value.Parent = _owner;
}
/// <summary>
/// Adds an array of control objects to the collection.
/// </summary>
/// <param name="controls">An array of <see cref="Control"/> objects to add to the collection.</param>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection.addrange(v=vs.110).aspx
/// </remarks>
public virtual void AddRange(Control[] controls)
{
foreach (Control control in controls)
{
Add(control);
}
}
/// <summary>
/// Removes all controls from the collection.
/// </summary>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection.clear(v=vs.110).aspx
/// </remarks>
public virtual void Clear()
{
lock (_internalSyncRoot)
{
_controls.Clear();
}
}
/// <summary>
/// Determines whether the specified control is a member of the collection.
/// </summary>
/// <param name="control">The <see cref="Control"/> to locate in the collection.</param>
/// <returns>true if the <see cref="Control"/> is a member of the collection; otherwise, false.</returns>
public bool Contains(Control control)
{
lock (_internalSyncRoot)
{
return _controls.Contains(control);
}
}
/// <summary>
/// Determines whether the <see cref="Control.ControlCollection"/> contains an item with the specified key.
/// </summary>
/// <param name="key">The key to locate in the <see cref="Control.ControlCollection"/>.</param>
/// <returns>true if the <see cref="Control.ControlCollection"/> contains an item with the specified key; otherwise, false.</returns>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection.containskey(v=vs.110).aspx
/// </remarks>
public virtual bool ContainsKey(string key)
{
lock (_internalSyncRoot)
{
foreach (Control c in _controls)
{
if (c.Name == key)
{
return true;
}
}
return false;
}
}
/// <summary>
/// Copies the entire contents of this collection to a compatible one-dimensional <see cref="Array"/>, starting at the specified index of the target array.
/// </summary>
/// <param name="array">The one-dimensional <see cref="Array"/> that is the destination of the elements copied from the current collection. The array must have zero-based indexing.</param>
/// <param name="index">The zero-based index in array at which copying begins.</param>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.layout.arrangedelementcollection.copyto(v=vs.110).aspx
/// </remarks>
public void CopyTo(Array array, int index)
{
throw new NotImplementedException();
}
/// <summary>
/// Searches for controls by their <see cref="Name"/> property and builds an array of all the controls that match.
/// </summary>
/// <param name="key">The key to locate in the <see cref="Control.ControlCollection"/>.</param>
/// <param name="searchAllChildren">true to search all child controls; otherwise, false.</param>
/// <returns>An array of type <see cref="Control"/> containing the matching controls.</returns>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection.find(v=vs.110).aspx
/// </remarks>
public Control[] Find(string key, bool searchAllChildren)
{
throw new NotImplementedException();
}
/// <summary>
/// Retrieves the index of the specified child control within the control collection.
/// </summary>
/// <param name="child">The <see cref="Control"/> to search for in the control collection.</param>
/// <returns>A zero-based index value that represents the location of the specified child control within the control collection.</returns>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/1fz293fh(v=vs.110).aspx
/// </remarks>
public int GetChildIndex(Control child)
{
lock (_internalSyncRoot)
{
if (_controls.Contains(child))
{
return _controls.IndexOf(child);
}
throw new KeyNotFoundException();
}
}
/// <summary>
/// Retrieves the index of the specified child control within the control collection, and optionally raises an exception if the specified control is not within the control collection.
/// </summary>
/// <param name="child">The <see cref="Control"/> to search for in the control collection.</param>
/// <param name="throwException">true to throw an exception if the <see cref="Control"/> specified in the child parameter is not a control in the <see cref="Control.ControlCollection"/>; otherwise, false.</param>
/// <returns></returns>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/ta8fcz9s(v=vs.110).aspx
/// </remarks>
public virtual int GetChildIndex(Control child, bool throwException)
{
lock (_internalSyncRoot)
{
if (_controls.Contains(child))
{
return _controls.IndexOf(child);
}
if (throwException)
{
throw new ArgumentException();
}
return -1;
}
}
/// <summary>
/// Retrieves a reference to an enumerator object that is used to iterate over a <see cref="Control.ControlCollection"/>.
/// </summary>
/// <returns>An <see cref="IEnumerator"/>.</returns>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/ms158431(v=vs.110).aspx
/// </remarks>
public virtual IEnumerator GetEnumerator() => _controls.GetEnumerator();
/// <summary>
/// Retrieves the index of the specified control in the control collection.
/// </summary>
/// <param name="control">The <see cref="Control"/> to locate in the collection.</param>
/// <returns>A zero-based index value that represents the position of the specified <see cref="Control"/> in the <see cref="Control.ControlCollection"/>.</returns>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection.indexof(v=vs.110).aspx
/// </remarks>
public int IndexOf(Control control)
{
lock (_internalSyncRoot)
{
return _controls.IndexOf(control);
}
}
/// <summary>
/// Retrieves the index of the specified control in the control collection.
/// </summary>
/// <param name="key">The <see cref="Control"/> to locate in the collection.</param>
/// <returns>A zero-based index value that represents the position of the specified <see cref="Control"/> in the <see cref="Control.ControlCollection"/>.</returns>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection.indexof(v=vs.110).aspx
/// </remarks>
public virtual int IndexOfKey(string key)
{
throw new NotImplementedException();
}
/// <summary>
/// Removes the specified control from the control collection.
/// </summary>
/// <param name="value">The <see cref="Control"/> to remove from the <see cref="Control.ControlCollection"/>.</param>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection.remove(v=vs.110).aspx
/// </remarks>
public virtual void Remove(Control value)
{
lock (_internalSyncRoot)
{
_controls.Remove(value);
}
}
/// <summary>
/// Removes a control from the control collection at the specified indexed location.
/// </summary>
/// <param name="index">The index value of the <see cref="Control"/> to remove.</param>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection.removeat(v=vs.110).aspx
/// </remarks>
public void RemoveAt(int index)
{
lock (_internalSyncRoot)
{
_controls.RemoveAt(index);
}
}
/// <summary>
/// Removes the child control with the specified key.
/// </summary>
/// <param name="key">The name of the child control to remove.</param>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection.removebykey(v=vs.110).aspx
/// </remarks>
public virtual void RemoveByKey(string key)
{
throw new NotImplementedException();
}
/// <summary>
/// Sets the index of the specified child control in the collection to the specified index value.
/// </summary>
/// <param name="child">The child <see cref="Control"/> to search for.</param>
/// <param name="newIndex">The new index value of the control.</param>
/// <remarks>
/// http://msdn.microsoft.com/en-us/library/system.windows.forms.control.controlcollection.setchildindex(v=vs.110).aspx
/// </remarks>
public virtual void SetChildIndex(Control child, int newIndex)
{
lock (_internalSyncRoot)
{
if (_controls.Contains(child))
{
if (newIndex >= Count)
{
_controls.Remove(child);
_controls.Add(child);
}
else
{
_controls.Remove(child);
_controls.Insert(newIndex, child);
}
}
else
{
throw new ArgumentException();
}
}
}
#endregion
#region Explicit Interface Implementations
object ICloneable.Clone()
{
lock (_internalSyncRoot)
{
ControlCollection clone = new ControlCollection(_owner);
clone._controls = _controls;
return clone;
}
}
bool ICollection.IsSynchronized { get { return true; } }
object ICollection.SyncRoot { get { return _syncRoot; } }
int IList.Add(object control)
{
var controlControl = control as Control;
if (controlControl != null)
{
lock (_internalSyncRoot)
{
InternalAdd(controlControl);
return _controls.Count;
}
}
return -1;
}
void IList.Clear()
{
Clear();
}
bool IList.Contains(object value)
{
var valueControl = value as Control;
if (valueControl != null)
{
return Contains(valueControl);
}
return false;
}
int IList.IndexOf(object value)
{
var valueControl = value as Control;
if (valueControl != null)
{
return IndexOf(valueControl);
}
return -1;
}
void IList.Insert(int index, object value)
{
var valueControl = value as Control;
if (valueControl != null)
{
lock (_internalSyncRoot)
{
_controls.Insert(index, valueControl);
}
}
}
bool IList.IsFixedSize => false;
object IList.this[int index]
{
get
{
return this[index];
}
set
{
var valueControl = value as Control;
if (valueControl != null)
{
lock (_internalSyncRoot)
{
_controls[index] = valueControl;
}
}
}
}
void IList.Remove(object control)
{
var controlControl = control as Control;
if (controlControl != null)
{
Remove(controlControl);
}
}
void IList.RemoveAt(int index)
{
RemoveAt(index);
}
#endregion
}
}
}
Answer: All your members should be readonly! None of them should ever change.
You don't want someone to set your lock members to null. That would be bad, I don't think you possibly could change the parent Control, that would be weird, and I don't see the need to create a new instance of the list of child controls!
lock (_internalSyncRoot)
{
foreach (Control c in _controls)
{
if (c.Name == key)
{
return c;
}
}
throw new KeyNotFoundException();
}
I have absolutely no explanation to this, but I have a guts feeling it'd be better if you threw the exception outside the lock statement. A lock should do as little as possible, so I think you should throw outside the lock.
I'm no threading expert, ok? But this also feels wrong :
public virtual void AddRange(Control[] controls)
{
foreach (Control control in controls)
{
Add(control);
}
}
Everytime you call Add, you'll obtain a lock, then release the lock, then obtain the lock, etc. Consider obtaining a lock once and then calling InternalAdd in the loop. you'll avoid weird locking.
Finally, I hate regions, you know that already :p | {
"domain": "codereview.stackexchange",
"id": 16497,
"tags": "c#, multithreading, reinventing-the-wheel, thread-safety"
} |
Why are reversible gates not used? | Question: I was reading the book "The singularity is near" written by Kurzweil and he mentioned the reversible gates like for example the Fredkin gate. The advantage using such gates is that we could get rid of the thermal waste related to computation where bits just disappear into heat, and computation won't need any energy input. Those assumptions make these gates sound like a miracle solution. So the question is what technical hurdles are still preventing their large scale usage.
I also think it is a shame that I never heard about those gates in my electrical engineering bachelor and master studies at a top German university...
Answer: I am by no means an expert on this topic, but just from casually reading Wikipedia:
it relies on the motion of spherical billiard balls in a friction-free environment made of buffers against which the balls bounce perfectly
... this sounds very realistic.
Nobody has actually figured out how to actually make such gates yet, they're merely of theoretical interest. That might explain why you've never heard of them since engineering usually deals with practice.
The premise of Reversible Computing is that when a bit disappears, some amount of heat is generated. By using reversible gates, no bits ever appear or disappear so supposedly computation could be much more efficient with reversible gates.
The theoretical limit Reversible Computing claims to get around is that erasing 1 bit of information generates at least $kT\ln 2$ energy in heat. For a computer running at a toasty $60\,{}^\circ\mathrm{C}$ with $10^9$ transistors each making bits disappear at a rate of $5\,\mathrm{GHz}$, that corresponds to $16\,\mathrm{mW}$ of heat generation. That only accounts for a tiny proportion ($1/10000$) of a computer's energy usage.
Our current-day computers are not limited by heat generation associated with bits disappearing. They are limited by the inherent inefficiency in moving electrons around on tiny copper traces. | {
"domain": "cs.stackexchange",
"id": 14187,
"tags": "computer-architecture"
} |
is it good to have 100% accuracy on validation? | Question: i'm still new in machine learning. currently i'm creating an anomaly detection for flight data. it is a multivariate time series data that include timestamp, latitude, longitude, velocity and altitude of the aircraft. i'm splitting the data into train and test with 80% ratio. i used the keras LSTM autoencoder to do a anomaly detection. so here's my code
def create_sequence(data, time_step = None):
Xs = []
for i in range (len(data) - time_step):
Xs.append(data[i:(i + time_step)])
return np.array(Xs)
# pre-process to split the data
dfXscaled, scalerX = scaledf(df, normaltype=normalization)
num_train = int(df.shape[0]*ratio)
values_dataset = dfXscaled.values
train = values_dataset[:num_train, :]
test = values_dataset[num_train:, :]
# sequence input data [sample, time step, features]
train_input = create_sequence(train, time_step = time_step)
test_input = create_sequence(test, time_step = time_step)
train_time = index_time.index[:num_train]
test_time = index_time.index[num_train:]
# model
model_arch = []
last_layer = num_layers - 1
for x in range(num_layers):
if x == last_layer:
model_arch.append(tf.keras.layers.LSTM(num_nodes, activation='relu', return_sequences=True, dropout = dropout))
else:
model_arch.append(tf.keras.layers.LSTM(num_nodes, activation='relu', input_shape=(time_step, 4), dropout = dropout))
model_arch.append(tf.keras.layers.RepeatVector(time_step))
model_arch.append(tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(4)))
model = tf.keras.models.Sequential(model_arch)
opt= tf.keras.optimizers.SGD(learning_rate=learning_rate)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=opt,
metrics=[tf.keras.metrics.MeanAbsolutePercentageError(name='mape'), tf.keras.metrics.RootMeanSquaredError(name='rmse'), "mae", 'accuracy'])
history = model.fit(train_input, train_input, epochs=epochs, batch_size = num_batch, validation_data=(test_input, test_input), verbose=2, shuffle=False)
when i do a model evaluation, it come up with 100% accuracy
is it good to have 100% accuracy ? or my model is overfitting the data ?
Answer: Usually indicates something is wrong.
In your case, things which do not seem right:
One can easily get ~100% accuracy in anomaly detection - just keep predicting the majority class.
Is this model really for anomaly detection? Anomaly detection is a classification problem, but your metrics (MAPE, RootMeanSquaredError etc.) are regression metrics. | {
"domain": "datascience.stackexchange",
"id": 10853,
"tags": "keras, lstm, anomaly-detection"
} |
Can gravitational waves pass through a black hole? | Question: As the title says, what happens when a gravitational wave approaches a black hole? I would presume that something interesting happens because of the way spacetime works near black holes but I have no knowledge to back it up.
Answer: No, gravitational waves cannot pass through a black hole.
A gravitational wave follows a path through spacetime called a null geodesic. This is the same path that would be followed by a light ray travelling in the same direction, and gravitational waves are affected by black holes in the same way that light rays are. So for example gravitational waves can be refracted by gravitational lenses just as light waves are. And just like light waves, if a gravitational wave crosses the event horizon surrounding a black hole it is then doomed to travel inwards to the singularity and can never escape.
There is one caveat to this. When we talk about a gravitational wave we generally mean a ripple in spacetime that is relatively small. Specifically it is small enough that the energy of the gravitational wave does not significantly affect the spacetime curvature. So when we calculate the trajectory of a gravitational wave near a black hole we take the black hole geometry as fixed, i.e. unaffected by the wave, and we compute the trajectory of the wave in this fixed background.
This is exactly the same approach as we use for calculating the trajectories of light rays. Since light rays carry energy and momentum then, at least in principle, they have their own gravitational fields. But for both the light rays and gravitational waves likely to exist in the universe the energy carried is too small to make a significant contribution to the spacetime curvature.
When you say in your question:
I would presume that something interesting happens because of the way spacetime works near black holes
I would guess you are thinking that the gravitational wave could change the geometry near a black hole, but as described above typical gravitational waves don't have enough energy to do this. It would be reasonable to ask what happens if we give the wave enough energy, but the answer turns out to be that it no longer behaves like a simple wave.
Gravitational waves exist in a regime called linearised gravity where they obey a wave equation that is basically similar to the wave equation light obeys. If we increase the energy so much that gravity becomes non-linear (as if the case for black holes) then the oscillations in the spacetime curvature no longer obey a wave equation and need to be described by the full Einstein equations. For example it has been suggested, but not proven, that really high energy gravitational (or light) waves could interact with each other to form a bound state called a geon. I confess that I'm unsure how much work has been done studying oscillations in this regime. | {
"domain": "astronomy.stackexchange",
"id": 3685,
"tags": "black-hole, general-relativity, gravitational-waves"
} |
Is it possible to grayscale a MPO file (3D camera) for a CNC carving | Question: I had/have a 3D camera back in the day, and I gotten a few pictures. I kinda stopped using it since there wasn't anything I could do with the MPO files. However, the camera might gain a new life if I can turn pictures into a CNC carving by grayscale the MPO files. Does anyone know if this is possible and where it is?
BTW I wasn't sure if this is the right subgroup to ask this to. If it isn't, then please kindly direct me to the sub group I should ask such a question to.
Answer: This was answered at another place. There is 2 programs that I know about now.
https://triaxes.com/articles/from-stereo-pair-to-lenticular-print/
http://www.depthmask.com/priceen.html
none so far is that great when it comes to the details | {
"domain": "engineering.stackexchange",
"id": 3118,
"tags": "3d-printing, cnc"
} |
How do you make a 3D print with an accurately aligned bore hole? | Question: I am printing a wheel with a 2.5mm hole in the middle to accept a shaft of a motor with a set screw. The problem I am having is that the wheel is always wobbly when spinning (sometimes more, sometimes less without changing any parameters). Is there anything I can do to make this print as perfect as possible and prevent the wheel from wobbling?
Answer: 3D printing bores/holes is inherently and wildly inaccurate. You can continuously tweak the model, material, and print configurations to get better results, but for best results, in my experience, redraw the hole to a slightly smaller size than your target and reprint. Then, after printing, use a drill to get the size and geometry more precise.
For your case, re-design the hole in your 3D model to 2mm-2.2mm and use a 2.5mm drill after it's printed. Make sure with your infill amount that after drilling you still have a solid hole surface(infill is not exposed) Also, if you have a drill-press, use that for better drilling results. | {
"domain": "engineering.stackexchange",
"id": 1150,
"tags": "mechanical-engineering, 3d-printing, prototyping"
} |
Compute unknown matrices that minimize a sum | Question: This problem is about working with smart-phone accelerometers.
To calibrate accelerometer, I need to find three unknown matrices T, K and B that minimize this sum:
$$\sum_{i=0}^N(|g|^2 - |TK(a_i + B)|^2)^2,$$
where $a_i$ is a known 3x1 vector and |$g$| is a known constant and unknown matrices are:
$$T = \begin{bmatrix} 1 & t_1 & t_2 \\ 0 & 1 & t_3 \\ 0 & 0 & 1\end{bmatrix},\ K=\begin{bmatrix} k_1 & 0 & 0 \\ 0 & k_2 & 0 \\ 0 & 0 & k_3\end{bmatrix},\ B=\begin{bmatrix} b_1 \\ b_2 \\ b_3\end{bmatrix}.$$
I have read in an article one can solve such problems by Levenberg-Marquardt
(LM) algorithm. Unfortunately I cannot figure out how. Can anyone suggest a solution to this minimization problem? And I need to implement the algorithm in python so if you know any good python library please name it.
Answer: I've found the answer. You can solve such problems using scipy.optimize.root which includes Levenberg-Marquardt (LM) algorithm. | {
"domain": "cs.stackexchange",
"id": 11618,
"tags": "optimization, matrices, linear-algebra, signal-processing"
} |
How to efficiently see if a value is in a set | Question: I was curious is there a really efficient way to see if a value is in a set? This question comes from me thinking about youtube views. As far as I understand youtube view count goes up for every unique view it receives (let's imagine it does if it doesn't already). There are millions or billions of viewers per video. That means to see if it a person first time seeing a video is O(1) (hash set). However, that means the set size has to be possibly millions or billions in length per video (there are a lot of videos), that's a lot of space. How can you get the same result, adding to a set of unique users without possibly using all that space? Is there a more space-efficient solution possible?
Answer: Great question! First, I do not know how YouTube's view counting works, but I will answer the question you ask about counting unique visitors.
This problem is known as the Count-distinct problem. If you want to really count the exact number of unique values, then in the worst case, it can only be done if you save the entire set of values seen so far; this requires at least
$\Omega(N)$ space to store whether or not you have seen $N$ different values, so it becomes infeasible if $N$ is large.
However, there are much better solutions known which are approximation algorithms to the true value, such as HyperLogLog. The idea of these solutions the following algorithm, which is straightforward to understand, and quite beautiful:
For each input value, calculate $\mathsf{hash}(v)$, where $\mathsf{hash}$ is a hash function. Look at how many $0$s are at the end of $\mathsf{hash}(v)$ (trailing zeros), and keep track of the maximum number of trailing zeros seen so far, $k$. Then estimate the number of distinct elements as $2^k$.
It turns out that the estimate $2^k$ is a good approximation to the real number of unique views, and it only requires storing $k$, which takes very little space.
But there are improvements to make it even more accurate, and these are incorporated in algorithms such as HyperLogLog. | {
"domain": "cs.stackexchange",
"id": 19396,
"tags": "algorithms, data-structures, sets, hash"
} |
FCL integration | Question:
I'd like to use FCL as a collision checker in my own planner that's currently not plugged into Moveit. Is there any documentation or code out there that can help me integrate FCL into my planner? I'm going to be testing on the PR2, so I'd imagine people have done this before Moveit, but I can't find any information.
Originally posted by vhwanger on ROS Answers with karma: 52 on 2014-05-17
Post score: 0
Answer:
The online doxygen documentation should be helpful. There´s also a wrapper in OMPL.
I´d recommend taking a look at the implementation in MoveIt! as a lot of what has been done there will be highly relevant to your use case, too (especially considering that you´re likely also using the PR2 URDF model, so a lot of the link transform setup etc. should transfer). Looking at this blog post from 2012 it seems that a lot of FCL is relatively new, so OMPL and MoveIt! are likely the most comprehensive usage examples.
Originally posted by Stefan Kohlbrecher with karma: 24361 on 2014-05-17
This answer was ACCEPTED on the original site
Post score: 0
Original comments
Comment by fergs on 2014-05-18:
Another good source of documentation on usage of FCL is the tests within FCL. | {
"domain": "robotics.stackexchange",
"id": 17987,
"tags": "ros, fcl"
} |
Stern and Gerlach experiment | Question:
The state of a spin-$\frac12$ particle that is spin up along the axis whose direction is specified by the unit vector $n=\sin\theta\cos\phi i+\sin\theta\sin\phi j+\cos\theta k,$ is given by $$|+n \ \rangle=\cos\frac{\theta}{2}|+z \ \rangle + e^{i\phi}\sin\frac{\theta}{2}|-z \ \rangle.$$
a. Suppose that a measurement of $S_z$ is carried out on a particle in the state $|+n \ \rangle.$ What is the probably that the measurement yields $\hbar/2$, $-\hbar/2$. How about the measurements of $S_x?$
b. Determine the uncertainty $\triangle S_z$ and $\triangle S_x$ of your measurements.
I know how to the measurement of $S_z$ for parts a and b, but I am not sure how to do the measurements for $S_x$. I have:
For reference, $$|+x \ \rangle = \frac{1}{\sqrt{2}}|+ z \ \rangle+\frac{1}{\sqrt2}|-z \ \rangle$$ $$|+y \ \rangle = \frac{1}{\sqrt{2}}|+ z \ \rangle+\frac{i}{\sqrt2}|-z \ \rangle.$$
a.) Probability for $\hbar/2$: $$|\langle \ +z |+n \rangle|^2 = \cos^2\frac{\theta}{2}$$
Probability for $-\hbar/2$: $$|\langle \ -z |+n \rangle|^2 = \left|e^{i\phi}\sin\frac{\theta}{2}\right|^2=\sin^2\frac{\theta}{2}.$$
But I am not sure how to do it for $S_x?$
Answer: To find the probability of measuring $\left|+x\right\rangle$ you need to determine the expression $\left|\left\langle+x\middle|+n\right\rangle\right|^2$. You have the expression for $\left|+n\right\rangle$ and you have the expression for $\left|+x\right\rangle$. Therefore you first need to determine the bra vector $\left\langle +x\right|$ which is found by taking the complex conjugate of each coefficient in the $\left|+x\right\rangle$ expression and turning all of the ket vectors to bra vectors. Doing this, we find the following:
$$\left\langle+x\right|= {1 \over \sqrt{2}}\left\langle+z\right|+{1 \over \sqrt{2}}\left\langle-z\right|.$$
You can then use this expression along with the expression given for $\left|+n\right\rangle$ to find:
$$\left\langle+x\middle|+n\right\rangle=\left({1 \over \sqrt{2}}\left\langle+z\right|+{1 \over \sqrt{2}}\left\langle-z\right|\right)\left(\cos{\theta \over 2}\left|+z\right\rangle+e^{i\phi}\sin{\theta \over 2}\left|-z\right\rangle\right)$$
$$\Longrightarrow{1 \over \sqrt2}\cos{\theta \over 2}+{e^{i\phi} \over \sqrt2}\sin{\theta \over 2}.$$
Then, you simply find the modulus squared of this expression to obtain the probability of measuring $+{\hbar \over 2}$. The process is entirely similar for finding the probability of measuring $-{\hbar \over 2}$, the only difference being that you use the bra $\left\langle-x\right|$ rather than $\left\langle +x\right|$. | {
"domain": "physics.stackexchange",
"id": 19018,
"tags": "quantum-mechanics"
} |
Do the laws of geometric optics only apply to linear media? | Question: Do the laws of geometric optics,
the incident, reflected and transmitted rays lie on the same plane;
$\theta_r = \theta_i$;
$n_i \sin\theta_i = n_t \sin\theta_t$ (Snell's law),
apply only when dealing with linear media? By a linear medium I mean one in which the polarization and magnetization are proportional to the electric field and auxiliary magnetic field respectively:
\begin{equation}
\bf{P} = \chi_e \varepsilon_0 \bf{E} \;\;\; \textrm{and} \;\;\; \bf{M} = \chi_m \bf{H}.
\end{equation}
Answer: Yes, if by "linear medium" you mean "linear isotropic medium" (which is implicit in the definition you gave).
For a linear anisotropic medium (also called "birefringent"), like calcite, your law 1 is false (the transmitted beam can bend out of the plane of the incident and reflected beams) and your law 3 is either false or meaningless (you can have $\theta_i=0$ but $\theta_t \neq 0$, cf. birefringent beam displacers, but there isn't really a meaningful value for $n_t$).
Here's a depiction of birefringent materials' blatant disregard for Snell's law: A normal incident beam tilting off-normal within the material:
Your law 2 is universally valid, if you define "the reflected beam" in the normal sense. It's almost tautological ... You can get beams at other angles. A reflected second-harmonic beam will go at a different angle. If the medium diffracts the light for whatever reason (e.g. the medium is made of atoms and the "light" is X-rays), the diffracted beams have other angles. | {
"domain": "physics.stackexchange",
"id": 51664,
"tags": "electromagnetism, optics, geometric-optics"
} |
Universal family of hash functions | Question: How to prove that a $k$-universal family of hash functions is $(k-1)$-universal family?
I tried to prove it by definition of k-universal family of hash functions but I didn't know how to use the definition.
If I prove it, Is it necessary that a $1$-universal family of hash function is universal family?
Definitions:
Let $U$ be a universe of keys, and let $H$ be a finite collection of hash functions mapping $U$ to $\{0,\dots,m-1\}$.
$H$ is universal if $\forall x,y\in U$ where $x\neq y$: $\Pr[h(x)=h(y)] \leq \frac{1}{m}$ where $h$ is chosen randomly from $H$.
$H$ is k-universal if $\forall x_1,x_2,\dots,x_k\in U$ distinct elements and $\forall i_1,i_2,\dots,i_k\in \{0,\dots,m-1\}$: $\Pr[h(x_1)=i_1 \land h(x_2)=i_2 \land \dots \land h(x_k)=i_k] = \frac{1}{m^k}$ where $h$ is chosen randomly from $H$.
Answer: For your first question, here is a hint:
$$
\Pr[h(x_1) = i_1 \land \cdots \land h(x_{k-1}) = i_{k-1}] = \\
\sum_{i_k} \Pr[h(x_1) = i_1 \land \cdots \land h(x_{k-1}) = i_{k-1} \land h(x_k) = i_k].
$$
Regarding the connection between universal and $k$-universal: Consider the family consisting of all constant functions. This family is 1-universal but not universal (for $|U|>1$).
However, it turns out that every 2-universal family is universal (exercise). The converse isn't true: for example, the family of all functions $h$ such that $h(x_1) = 0$ is universal but not 2-universal (for $m>1$). | {
"domain": "cs.stackexchange",
"id": 9727,
"tags": "hash"
} |
Why are there two mature sequences of microRNAs in miRBase entries? | Question: In the miRBase database entries generally show a stem loop sequence and two mature sequences. For example the entry for hsa-mir-15a gives three sequences: (1) the Stem-loop sequence hsa-mir-15a, (2) Mature sequence hsa-miR-15a-5p, and (3) Mature sequence hsa-miR-15a-3p.
What is the difference between the two mature sequences? Does the stem loop structure divide into two different miRNA species?
Answer: I am not sure whether it is made clear in the Wikipedia article on microRNAs, but, yes, there are two possible mature mir species that can be generated from the stem-loop precursor — one from each side of the stem — although most often one greatly predominates.
The diagram in Wikipedia shows the generation of a single miRNA:
However, the entry mentioned in the question (hsa-mir-15a) shows that although the 5′-stem component (UAGCA…) is most abundant in this case, there is a small amount of the 3′-stem component (CAGGC…).
In Drosophila melanogaster a single stem component (more often from the 3′-stem, but also from the 3′-stem) most frequently predominates, but I have personally found microRNAs in which both components occur in similar amounts (unpublished). | {
"domain": "biology.stackexchange",
"id": 7035,
"tags": "genetics, microrna"
} |
Saving allowed positions from a 3x3 matrix | Question: I wrote the next code chunk to save all the allowed positions from a 3x3 matrix, it's working but I think it could be better. Besides something similar happens in my Randomizer class, although I admit it's messy.
public class MatrixRegistry : MonoBehaviour
{
private int matrixVolume;
private int matrixCount;
private GameObject[] matrix;
private List<GameObject> dynamicCubes = new List<GameObject>();
private List<Vector3> staticCubes = new List<Vector3>();
private List<Vector3> allowedPositions = new List<Vector3>();
public Func<List<GameObject>> GetDynamicCubes { get; private set; }
public Func<List<Vector3>> GetAllowedPositions { get; private set; }
private void Awake()
{
#region Getting all Matrix Cubes
matrixCount = gameObject.transform.childCount;
if (matrixCount != 0)
{
matrix = new GameObject[matrixCount];
for (int i = 0; i < matrixCount; i++)
{
matrix[i] = gameObject.transform.GetChild(i).gameObject;
if (matrix[i].GetComponent<CubeInfo>().cubeType == CubeSort.Dynamic)
{
dynamicCubes.Add(matrix[i]);
}
if (matrix[i].GetComponent<CubeInfo>().cubeType == CubeSort.Static)
{
staticCubes.Add(matrix[i].transform.position);
}
}
}
GetDynamicCubes = () => dynamicCubes;
#endregion
#region Setting Allowed Positions
matrixVolume = 3; // Hard Coding
for (int x = 0; x < matrixVolume; x++)
{
for (int y = 0; y < matrixVolume; y++)
{
for (int z = 0; z < matrixVolume; z++)
{
Vector3 position = new Vector3
{
x = x,
y = y,
z = z
};
allowedPositions.Add(position);
}
}
}
for (int f = 0; f < staticCubes.Count; f++)
{
for (int a = 0; a < allowedPositions.Count; a++)
{
if (allowedPositions[a] == staticCubes[f])
{
allowedPositions.Remove(allowedPositions[a]);
}
}
}
GetAllowedPositions = () => allowedPositions;
#endregion
}
}
public class Randomizer
{
private MatrixRegistry matrixRegistry;
private EventManager eventManager;
public Randomizer(MatrixRegistry matrixRegistry, EventManager eventManager)
{
this.matrixRegistry = matrixRegistry;
this.eventManager = eventManager;
eventManager.OnStartGame += Randomize;
}
private void Randomize()
{
for (int i = 0; i < matrixRegistry.GetDynamicCubes().Count; i++)
{
Vector3 randomPosition = new Vector3
{
x = Random.Range(0, 3),
y = Random.Range(0, 3),
z = Random.Range(0, 3)
};
for (int j = 0; j < matrixRegistry.GetAllowedPositions().Count; j++)
{
for (int k = 0; k < matrixRegistry.GetDynamicCubes().Count; k++)
{
if (randomPosition == matrixRegistry.GetAllowedPositions()[j])
{
if (randomPosition != matrixRegistry.GetDynamicCubes()[k].transform.position)
{
matrixRegistry.GetDynamicCubes()[i].transform.position = randomPosition;
}
}
}
}
}
}
}
The randomizer class should check all the allowed positions to get in place all the cubes and avoid overlapping issues.
I know it's not the best, and this is why I'm here.
Answer: The way you are checking if any allowedPositions are in the staticCubes list seems very inefficient to me. I think it would be more efficient to check whether each position is in the staticCubes list as they are created then add them to the allowedPositions list.
Using a LINQ query for this wil also make it much more concise:
allowedPositions = (from int x in Enumerable.Range(0, matrixVolume)
from int y in Enumerable.Range(0, matrixVolume)
from int z in Enumerable.Range(0, matrixVolume)
let position = new Vector3(x, y, z)
where !staticCubes.Contains(position)
select position).ToList(); | {
"domain": "codereview.stackexchange",
"id": 33232,
"tags": "c#, unity3d"
} |
Visual plugin loaded twice | Question:
Hello everybody,
I'm having an issue with a visual plugin. Similarly to Illuminatur in this thread, the plugin I'm using is loaded twice.
More precisely, it's loaded twice only when the model of the robot I'm using is loaded in addition to the model that uses the plugin.
The plugin I'm referring to is here, it's a plugin to put texture on an object via YARP.
The robot I'm simulating is the iCub (sdf files here)
Is that a known/desired behavior? Is there a simple way to prevent Gazebo from loading it twice?
Thank you
Originally posted by MSI on Gazebo Answers with karma: 1 on 2017-04-20
Post score: 0
Answer:
When you launch a gazebo world that has a camera sensor, two rendering scenes are actually created (server + client). So the visual plugin is loaded twice because it's loaded once on the server (for the camera sensor) and another time on the client (for the main UI window).
Originally posted by iche033 with karma: 1018 on 2017-04-20
This answer was ACCEPTED on the original site
Post score: 1
Original comments
Comment by MSI on 2017-05-11:
Hello Iche,
Thank you very much for your answer. I would upvote if I could. I will now contact the people who developed that plugin! | {
"domain": "robotics.stackexchange",
"id": 4088,
"tags": "gazebo"
} |
For quantum particle with quantum number $$ in an infinite square well are these statement correct | Question: I was wondering if the following statements are correct for a quantum particle in infinite square well
The greater $$, the greater average kinetic energy associated with this wave function.
In the limit of high quantum numbers, the quantum mechanical result reduces to its classical counterpart in the classical domain.
We will finally get a classical probability density if $n$ gets greater.
Answer: The kinetic energy is proportional to the curvature of the wave function, and indeed the larger $n$ gets the larger the curvature of the wave function, which implies that the kinetic energy becomes larger.
The quantum probability distribution does approach a uniform distribution in the limit of large $n$, so you can think of this as a "classical distribution", although note that this comes with many caveats (e.g. classically we can know the exact position and momentum of the particle at any point, so in principle there is no need for a probability distribution).
Some time ago I prepared some interactive code that you can use to visualize the wave function for the infinite square well for different values of $n$, you can find it in this Jupyter Notebook. | {
"domain": "physics.stackexchange",
"id": 73082,
"tags": "quantum-mechanics, homework-and-exercises, classical-mechanics, wavefunction, schroedinger-equation"
} |
How diverse are dogs in their traits other than appearance? | Question: I've asked this question about dogs not so long ago, and the short answer was - dogs are the most diverse looking species of mammals because they got a small number of genes that have a big impact on appearance.
I would like to ask a follow up question- differences in appearance are easy to notice, and I would like to learn about other traits that might be genetically based.
Are there other major differences between dog breeds, like longevity, cognitive performance, friendliness, etc? Or are all dogs more or less the same in traits other than appearance?
Are there plots of traits like these among different dog breeds that identify outliers?
Answer: Dogs are highly diverse in their patterns of copy number variation (variation in the number of copies of different genes). This has been shown to be probably related to the diversity in morphology. It would thus presumably also affect other traits such as those that you list, and it seems to at least have been shown for disease susceptibility. Interestingly, wolves have less copy number variation on average than dogs (citation). | {
"domain": "biology.stackexchange",
"id": 1190,
"tags": "genetics, zoology, dogs"
} |
EKF localization data association | Question: I am working with ROS indigo and clearpath huskyA200 and wanted to implement the EKF localization with unknown correspondences with my own hokuyo lidar data for a school project. Giving the algorithm in page 217 of the probabilistic robotics book by Thrun. (Picture of the algorithm is given below), what does step 9 mean by “for all observed features”, does it mean all the raw data in a lidar scan? Or does it mean to process the raw data first to find the features? If it’s the latter, what would be some technique to process the raw data to find features?
This stackoverflow post helped me understand this algorithm a lot better, but now, i am just not sure how to provide the observed feature with my own lidar scan data.
Any help is appreciated, thank you.
Answer: It should be processed features. Extracting features from raw data is usually called as front-end in SLAM. The easiest forms of features in cased of 2D LiDAR are corners, edges, and lines. You can run RANSAC algorithm to find line segments. Corners are found by intersection of lines and edges are ends of lines. Corners should be enough for a uni project.
ICP can be utilized to register raw scans but that becomes a different type of SLAM. | {
"domain": "robotics.stackexchange",
"id": 1739,
"tags": "mobile-robot, slam, localization, ekf, data-association"
} |
How to find the wavefunction that solves an infinite square well with a delta function well in the middle? | Question: Solutions for the wavefunction in an infinite square well with a delta function barrier in the middle are easily found online (see here for an example). I am wondering what the wavefunction is for an infinite square well with a delta function well in the middle. The setup is the bottom of the infinite square well is defined to be zero energy. I realize that there will be two situations, one where the particle's energy is less than zero and will therefore be bound to the delta function well and one where the particle's energy is greater than zero and is bound to the infinite square well. What are the wavefunctions for these two situations?
Answer: Consider an infinite square with free region $[0, L]$. Place a delta function potential at $L/2$ with strength $\alpha$. Then, Schrödinger's equation is
$$E\psi = -\frac{\hbar^{2}}{2m}\frac{\partial^{2}\psi}{\partial x ^{2}} + \alpha \delta(x- \frac{L}{2})\psi$$
First, note that the delta function is zero everywhere except at the center, which means that the energy eigenstates everywhere else are given by $\psi = A \sin (kx + \phi)$. Furthermore, we know that $\psi(0) = \psi(L) = 0$. Thus, we know that on the left hand side of the delta, we have $\phi = 0$, and on the right hand side, we have $\phi = - kL$. Thus, for $0<x<L/2$, we have $\psi = A \sin(kx)$, while for $L/2 < x < L$, we have $\psi = B \sin (k(x-L))$. The wave function must be continuous at $L/2$, so this guarantees that $A=-B$.
We can derive a restriction on $k$ by integrating Schrödinger's equation over an arbitrarily small region around $x = L/2$. Since the wave function is continuous, the left hand side drops off, and we're left with:
$$0 = -\frac{\hbar^{2}}{2m}(\psi^{\prime}_{+} - \psi{\prime}_{-}) + \alpha\psi(L/2)$$
Or, more concretely,
$$\psi^{\prime}_{+} = \frac{2m\alpha}{\hbar^{2}}\psi(L/2) + \psi^{\prime}_{-} $$
Taking the derivative and substituting, we find:
$$-Ak\cos(kL/2) = \frac{2m\alpha}{\hbar^{2}}A\sin(kL/2)+Ak\cos(kL/2)$$
Finally, this gives us the transcendental equation
$$\tan(kL/2) = - \frac{\hbar^{2}k}{m\alpha}$$
Noting that we still have, as in the finite square well case, $E = \frac{\hbar^{2}k^{2}}{2m}$, all solutions have $E > 0$, irrespective of the sign of $\alpha$, although simple graphing shows that there are infinite numbers of solutions to this equation, independent of the sign of $\alpha$ (though the sign of and value of $\alpha$ will shift where those solutions are quite dramatically). | {
"domain": "physics.stackexchange",
"id": 9784,
"tags": "quantum-mechanics, homework-and-exercises, wavefunction, schroedinger-equation, potential"
} |
What are "local degrees of freedom in gravity", and why do they lead to fixed energy densities? | Question: I am reading Jan de Boer's review of the AdS/CFT correspondence and I quote from end of page 1, where he is talking about equivalence of $(d+1)$-dimensional gravity to $d$-dimensional field theory
“If true, it implies [...]. If the degrees of freedom in gravity would be local, one would imagine that one can have arbitrarily large volumes with fixed energy density.[...]”
I don't quite understand that. What does it mean for “degrees of freedom to be local”? And how does that lead to fixed energy-density?
Answer: Let's imagine discretizing spacetime on a lattice. For an ordinary scalar field, you can independently the field value at each point on the lattice. Thus there is one degree of freedom at each spacetime point. We say that the field is a local degree of freedom. If you like, the number of local degrees of freedom is the number of field values you specify on the lattice, divided by the number of lattice points.
To discuss energy, let's take a simple example where we have a field sitting at the minimum of the potential. Then the energy associated with a single lattice point is determined only by the potential energy $V(\phi_{min})$. Here we see that the energy density is fixed over the whole spacetime, in the sense that the energy associated with each lattice point is the same. If there are $N$ points on the lattice, then the total energy of the whole lattice is $NV(\phi_{min})$. The energy scales like the number of spacetime points, which is the volume.
Based on the holographic principle, the expectation is that quantum gravity won't have local degrees of freedom. For example, we expect the number of degrees of freedom in a region to scale like the area of that region, not its volume. | {
"domain": "physics.stackexchange",
"id": 25633,
"tags": "black-holes, quantum-gravity, ads-cft, holographic-principle, degrees-of-freedom"
} |
Can pressure create the same effect as gravity? | Question: So here's my question: In high gravity environments we'd literally be crushed and extreme water pressure produces the same result. So is it at all possible that we could provide a pressurized environment that creates something similar to Earth's gravity?
Answer: No and here's the difference.
In extremely high gravity, we would be flattened against the ground. Our bones could not support our own weight and would break.
In a high pressure environment, we would implode; crushed from all sides. Our bones would be perfectly capable of supporting us and (if there were no gravity to begin with) we could even fly around the room, but the high pressure would act on us like your fist around a Styrofoam ball (there are also numerous other effects that high pressure gases have on your blood vessels, brain, lungs, ears, etc)
What to take home from this? There are many ways to be crushed, but not all of them are the same thing. | {
"domain": "physics.stackexchange",
"id": 47586,
"tags": "gravity"
} |
Problems with the Turtlebot Dashboard Launch | Question:
When I type in rqt -s turtlebot_dashboard to terminal I get the error
WARNING: Package name "swig-wx" does not follow the naming conventions. It should start with a lower case letter and only contain lower case letters, digits and underscores.
WARNING: Package name "swig-wx" does not follow the naming conventions. It should start with a lower case letter and only contain lower case letters, digits and underscores.
RosPyPluginProvider.discover() could not find ROS master, all rospy-based plugins are disabled
qt_gui_main() found no plugin matching "turtlebot_dashboard"
What do I do to launch the Dashboard?
Originally posted by smkybear15 on ROS Answers with karma: 1 on 2013-03-10
Post score: 0
Original comments
Comment by prasanna.kumar on 2013-03-11:
You could overlook this warning. I get this warning every time and it's not a problem.
Answer:
I believe Groovy by default assumes a TurtleBot is using a Kabuki base with related drivers and sensors. When using a TurtleBot with the Create base, you'll see errors similar to that in the original post.
For me the key to getting a dashboard up with the Create base is to set the following environment variables on both the robot laptop and the remote workstation:
export TURTLEBOT_BASE=create
export TURTLEBOT_STACKS=circles
export TURTLEBOT_3D_SENSOR=kinect
I can then get a working dashboard by running the following on the robot laptop:
roslaunch turtlebot_bringup minimal.launch
And the following command on my workstation laptop:
roslaunch turtlebot_dashboard turtlebot_dashboard.launch
Originally posted by mpthompson with karma: 153 on 2013-03-15
This answer was ACCEPTED on the original site
Post score: 1
Original comments
Comment by Chik on 2013-03-17:
You are right. | {
"domain": "robotics.stackexchange",
"id": 13289,
"tags": "roslaunch, turtlebot"
} |
Garbage collection in JavaScript multiplayer game | Question: I am currently creating a Multiplayer Game using node.js and web sockets. The client side is receiving the positional information and applying it to all the players on the client side. Each client stores a list of all game elements.
The main issue I am experiencing at the moment is a large amount of Garbage Collection. I am unsure what could be causing this, since I followed all of the tips I have found online; Set Variables to null to make sure they are collected asap. Dont create objects during runtime etc.
After running a profile, this is the result:
As you can see, when the GC kicks in, the game stops for a long time.
How can I reduce/remove GC and object creation from my code. It is fully working, but it performs poorly on low end devices.
I initialize all the arrays at the start.
var energies = [];
var walls = [];
var users = [];
var leaderboard = [];
Those arrays are then filled with data by the server:
socket.on('serverTellPlayerMove', function(userData, strEnergy) {
var visible = [];
for (var i = 0; i < userData.length;) {
if ((userData[i] == 1) == true) {
visible.push({
p : userData[0 + i] == 1,
score : userData[1 + i],
x : userData[2 + i],
y : userData[3 + i],
angle : userData[4 + i],
hue : userData[5 + i],
name : userData[6 + i],
dead : userData[7 + i] == 1,
dying : userData[8 + i] == 1
});
i += 9;
} else {
visible.push({
x : userData[0 + i],
y : userData[1 + i],
angle : userData[2 + i],
hue : userData[3 + i],
name : userData[4 + i],
dead : userData[5 + i] == 1,
dying : userData[6 * i] == 1
});
i += 7;
}
}
// SET DATA:
for (var i = 0; i < visible.length; i++) {
if (visible[i].p) {
var playerData = visible[i];
i = visible.length;
}
}
if (userData != "") {
if (playerType == 'player') {
// HOW MUCH YOU MOVED BY:
var xoffset = player.x - playerData.x;
var yoffset = player.y - playerData.y;
player.x = playerData.x;
player.y = playerData.y;
player.angle = playerData.angle;
player.hue = playerData.hue;
player.xoffset = isNaN(xoffset) ? 0 : xoffset;
player.yoffset = isNaN(yoffset) ? 0 : yoffset;
player.dead = playerData.dead;
document.getElementById("killsText").innerHTML = "Score: "
+ playerData.score;
}
}
// DEATH ANIM:
for (var i = 0; i < visible.length; i++) {
if (visible[i].dying) {
createExplosion(visible[i].x, visible[i].y);
}
}
// DEATH ANIM:
for (var i = 0; i < visible.length; i++) {
if (visible[i].dying) {
createExplosion(visible[i].x, visible[i].y);
}
}
users = visible;
visible = null;
var energiesList = [];
for (var i = 0; i < strEnergy.length; i += 5) {
energiesList.push({
x : strEnergy[0 + i],
y : strEnergy[1 + i],
index : strEnergy[2 + i],
animate : strEnergy[3 + i] == 1,
hue : strEnergy[4 + i]
});
}
var energyT = null;
for (var i = 0; i < energiesList.length; ++i) {
energyT = energiesList[i];
if (energyT.animate) {
energyT.animScale = 0;
} else {
if (energies[energyT.index] != undefined)
energyT.animScale = energies[energyT.index].animScale;
}
energies[energyT.index] = energyT;
}
energiesList = null;
energyT = null;
userData = null;
strEnergy = null;
});
This code is called 30 times a second. This updates every users data on each client side. It also updates the pellet data. Pellets are objects that can be picked up by players in the game. The above snippet does most of the logical work on the client side.
Each object is also rendered as follows:
window.requestAnimFrame = (function() {
return window.requestAnimationFrame || window.webkitRequestAnimationFrame
|| window.mozRequestAnimationFrame
|| window.msRequestAnimationFrame || function(callback) {
window.setTimeout(callback, 1000 / 60);
};
})();
function animloop() {
gameLoop();
animLoopHandle = window.requestAnimFrame(animloop);
}
function doGame() {
now = Date.now();
delta = now - lastUpdate;
lastUpdate = now;
startX = player.x - (screenWidth / 2);
startY = player.y - (screenHeight / 2);
graph.fillStyle = backgroundColor;
graph.fillRect(0, 0, screenWidth, screenHeight);
drawgrid();
drawEnergies(energies);
drawPlayers(users);
drawWalls(walls);
}
And the actual rendering:
function drawPlayers(playersToDraw) {
var playerC = null;
var fontSize = Math.max(playerRadius / 1.2, 11);
var radiusD = (playerRadius * 2.0);
for (var z = 0; z < playersToDraw.length; z++) {
playerC = playersToDraw[z];
if (!playerC.dead) {
var shapeX = playerC.x - startX;
var shapeY = playerC.y - startY;
var rotation = (Math.PI / 180) * playerC.angle;
scale = radiusD;
graph.fillStyle = 'hsl(' + playerC.hue + ', 50%, 45%)';
graph.strokeStyle = 'hsl(' + playerC.hue + ', 50%, 60%)';
graph.lineJoin = "round";
graph.lineWidth = playerHRadius;
graph.save();
graph.translate(shapeX, shapeY);
graph.rotate(rotation);
graph.beginPath();
graph.moveTo(0, (-2 * scale / 3));
graph.lineTo((-scale / 2), scale / 3);
graph.lineTo((scale / 2), scale / 3);
graph.lineTo(0, (-2 * scale / 3));
graph.closePath();
graph.stroke();
graph.fill();
graph.restore();
}
}
playerC = null;
}
As you can see I am setting playerC to null in an Attempt to make it be collected by the GC.
There are some more rendering functions as well, but they are all very similar to the player draw function.
How do I reduce GC on this code and does the data received from the server need to be collected as well?
I have attempted to implement v-sync, but then I realized that the movement data is sent by the server, to there is no way to use delta time or v-sync.
As suggested, I did a Heap Snapshot. Here is the result:
And here is what was actually removed:
After some further looking around, I found this:
It points at the data received from the server. Also I noticed some throw TypeError statements, what could be causing that?
Here is a LIVE DEMO of the code in action:
http://www.vertix.io
Answer: I think you should use a static approach for the visible array, avoiding the push instruction: declare and fill the array with dummy structures outside the socket.on handler and then iterate over the items to change their value.
Use a variable to keep the length of the received values, instead of calling visible.length().
You are also copying many times your data: in Javascript var playerData = visible[i]; makes another copy of the data, consider accessing the player's data by index instead of copying it in another structure.
The same problem arises for the energiesList array. Define it outside the event and only update its values inside, keeping the real lenght in a variable.
Hope it helps.
EDIT
This is how the code should be refactored (you need to test it)
MAX_VISIBLES = 500;
VISIBLE_PLAYER = 1;
VISIBLE_OTHER = 0;
var visible = [];
for (var i = 0; i < MAX_VISIBLES; i++) {
visible.push({
type: 0,
p : 0,
score : 0,
x : 0,
y : 0,
angle : 0,
hue : 0,
name : 0,
dead : 0,
dying : 0
});
}
// Just need it once (only if you use it anywhere else in the code)
users = visible;
// You'll need it if you want to iterate over users
var usersLength = 0;
MAX_ENERGIES = 1000;
var energiesList = [];
for (var i = 0; i < MAX_ENERGIES; i++) {
energiesList.push({
x : 0,
y : 0,
index : 0,
animate : 0,
hue : 0
});
}
socket.on('serverTellPlayerMove', function(userData, strEnergy) {
for (var i = 0, visibleLength = 0; i < userData.length; visibleLength++) {
if ((userData[i] == 1) == true) {
visible[visibleLength].type = VISIBLE_PLAYER;
visible[visibleLength].p : userData[0 + i] == 1;
visible[visibleLength].score : userData[1 + i];
visible[visibleLength].x : userData[2 + i];
visible[visibleLength].y : userData[3 + i];
visible[visibleLength].angle : userData[4 + i];
visible[visibleLength].hue : userData[5 + i];
visible[visibleLength].name : userData[6 + i];
visible[visibleLength].dead : userData[7 + i] == 1;
visible[visibleLength].dying : userData[8 + i] == 1;
i += 9;
var playerData = visible[visibleLength];
} else {
visible[visibleLength].type = VISIBLE_OTHER;
visible[visibleLength].x : userData[2 + i];
visible[visibleLength].y : userData[3 + i];
visible[visibleLength].angle : userData[4 + i];
visible[visibleLength].hue : userData[5 + i];
visible[visibleLength].name : userData[6 + i];
visible[visibleLength].dead : userData[7 + i] == 1;
visible[visibleLength].dying : userData[8 + i] == 1;
i += 7;
}
}
// I don't think you need this cycle: playerdata can be assigned
// during userData parsing
// // SET DATA:
// for (var i = 0; i < visible.length; i++) {
// if (visible[i].p) {
// var playerData = visible[i];
// i = visible.length;
// }
// }
if (userData != "") {
if (playerType == 'player') {
// HOW MUCH YOU MOVED BY:
var xoffset = player.x - playerData.x;
var yoffset = player.y - playerData.y;
player.x = playerData.x;
player.y = playerData.y;
player.angle = playerData.angle;
player.hue = playerData.hue;
player.xoffset = isNaN(xoffset) ? 0 : xoffset;
player.yoffset = isNaN(yoffset) ? 0 : yoffset;
player.dead = playerData.dead;
document.getElementById("killsText").innerHTML = "Score: "
+ playerData.score;
}
}
// DEATH ANIM:
for (var i = 0; i < visibleLength; i++) {
if (visible[i].dying) {
createExplosion(visible[i].x, visible[i].y);
}
}
// DEATH ANIM:
for (var i = 0; i < visibleLength; i++) {
if (visible[i].dying) {
createExplosion(visible[i].x, visible[i].y);
}
}
// See above
// users = visible;
// visible = null;
usersLength = visibleLength;
for (var i = 0, energiesListLength = 0; i < strEnergy.length; i += 5, energiesListLength++) {
energiesList[energiesListLength].x : strEnergy[0 + i];
energiesList[energiesListLength].y : strEnergy[1 + i],
energiesList[energiesListLength].index : strEnergy[2 + i];
energiesList[energiesListLength].animate : strEnergy[3 + i] == 1;
energiesList[energiesListLength].hue : strEnergy[4 + i];
}
var energyT = null;
for (var i = 0; i < energiesListLength; ++i) {
energyT = energiesList[i];
if (energyT.animate) {
energyT.animScale = 0;
} else {
if (energies[energyT.index] != undefined)
energyT.animScale = energies[energyT.index].animScale;
}
energies[energyT.index] = energyT;
}
// You don't need to force GC: you need to avoid GC
// energyT = null;
// userData = null;
// strEnergy = null;
}); | {
"domain": "codereview.stackexchange",
"id": 16574,
"tags": "javascript, performance, node.js, memory-management, animation"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.