anchor
stringlengths
0
150
positive
stringlengths
0
96k
source
dict
Snake game in C++17 with SDL2
Question: I implemented a simple snake clone in C++, using SDL2 for the graphics part. Gameplay-wise, its pretty much classic snake: The player is able to control the snake with "WASD", food gets spawned randomly and if you eat it, you gain speed and length. The game is lost once you collide with yourself. Any tips? source.cpp #include <SDL.h> #include "Framework.h" #include "Game.h" int main(int argc, char* argv[]) { Snake::SDL_Framework framework("Snake", 800, 800); Snake::Game game(framework); game.run(); return 0; } Framework.h #ifndef SDL_FRAMEWORK #define SDL_FRAMEWORK #include <SDL.h> #include <cstdint> #include <string> #include <exception> namespace Snake { class SDL_Framework { public: SDL_Framework() : main_window{ nullptr }, renderer{ nullptr }, window_height{ 0 }, window_width{ 0 } {}; SDL_Framework(const std::string& window_name, int32_t height, int32_t width); ~SDL_Framework(); void clear(); void update(); int32_t get_height() const { return window_height; } int32_t get_width() const { return window_width; } SDL_Renderer* renderer; private: SDL_Window* main_window; int32_t window_height; int32_t window_width; }; } #endif Framework.cpp #include "Framework.h" static void process_error(const std::string& msg) { throw std::runtime_error(msg); } namespace Snake { SDL_Framework::SDL_Framework(const std::string& window_name, int32_t height, int32_t width) { if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER) < 0) { process_error(std::string("Error while trying to initialise SDL! SDL_Error: ") + SDL_GetError()); } main_window = SDL_CreateWindow(window_name.c_str(), SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, height, width, SDL_WINDOW_SHOWN); if (main_window == nullptr) { process_error(std::string("Error while trying to create SDL_Window! SDL_Error: ") + SDL_GetError()); } window_height = height; window_width = width; renderer = SDL_CreateRenderer(main_window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC); if (renderer == nullptr) { process_error(std::string("Error while trying to create renderer! SDL_Error: ") + SDL_GetError()); } SDL_SetRenderDrawColor(renderer, 0xFF, 0xFF, 0xFF, 0xFF); } SDL_Framework::~SDL_Framework() { SDL_Quit(); SDL_DestroyRenderer(renderer); SDL_DestroyWindow(main_window); } void SDL_Framework::clear() { SDL_SetRenderDrawColor(renderer, 0xFF, 0xFF, 0xFF, 0xFF); SDL_RenderClear(renderer); } void SDL_Framework::update() { SDL_RenderPresent(renderer); } } Game.h #ifndef SNAKE_GAME #define SNAKE_GAME #include "Framework.h" #include "Board.h" #include "Player.h" namespace Snake { class Game { public: Game(const SDL_Framework& uframework); void run(); void update(); void render(); private: SDL_Framework framework; Board board; Player player; int32_t frame; }; } #endif Game.cpp #include "Game.h" namespace Snake { Game::Game(const SDL_Framework& uframework) : frame{0} { framework = uframework; int32_t cells_per_side = 25; int32_t cell_size = framework.get_height() / cells_per_side; board = Board(framework.get_height(), framework.get_width(), cell_size); board.generate_food(); int32_t middle = cell_size * (framework.get_height() / cell_size / 2); // calculating the middle of the board in cell roster, assuming length == width player = Player(middle, middle); } void Game::run() { bool running = true; SDL_Event event; do { frame++; while (SDL_PollEvent(&event) != 0) { switch (event.type) { case SDL_QUIT: { running = false; break; } } } update(); framework.clear(); render(); framework.update(); } while (running); } void Game::update() { if (player.check_food_collision(board.get_food_pos())) { board.generate_food(); } player.update(board.get_borders(), board.get_cell_size(), frame); } void Game::render() { board.render(framework.renderer); player.render(framework.renderer, board.get_cell_size()); } } Board.h #ifndef SNAKE_BOARD #define SNAKE_BOARD #include <SDL.h> #include <utility> #include <random> #include "Cell.h" namespace Snake { class Board { public: Board() = default; Board(int32_t ulength, int32_t uwidth, int32_t cell_size); void generate_food(); void render(SDL_Renderer* renderer) const; std::pair<int32_t, int32_t> get_food_pos() const { return food.get_pos(); } std::pair<int32_t, int32_t> get_borders() { return { length, width }; } int32_t get_cell_size() const { return cell_size; } private: Cell food; int32_t length; int32_t width; int32_t cell_size; std::mt19937 gen; std::uniform_int_distribution<int32_t> distr; }; } #endif Board.cpp #include "Board.h" namespace Snake { Board::Board(int32_t ulength, int32_t uwidth, int32_t ucell_size) : length{ ulength }, width{ uwidth }, cell_size{ ucell_size } { gen = std::mt19937(std::random_device{}()); distr = std::uniform_int_distribution<int32_t>(0, (length / cell_size) - 1); } void Board::generate_food() { int32_t food_x = distr(gen) * cell_size; int32_t food_y = distr(gen) * cell_size; food.update_pos(food_x, food_y); } void Board::render(SDL_Renderer* renderer) const { SDL_SetRenderDrawColor(framework.renderer, 0, 0, 0, 255); // render cell grid for (int32_t distance = cell_size; distance < length; distance += cell_size) { SDL_RenderDrawLine(renderer, distance, 0, distance, length); SDL_RenderDrawLine(renderer, 0, distance, width, distance); } // render food food.render(renderer, cell_size, SDL_Color{ 0, 0, 0, 255 }); } } Player.h #ifndef SNAKE_PLAYER #define SNAKE_PLAYER #include <SDL.h> #include <vector> #include "Cell.h" namespace Snake { class Player { enum Direction { UP, DOWN, RIGHT, LEFT }; public: Player() = default; Player(int32_t x, int32_t y) : length{ 1 }, growth_remaining{ 2 }, growth_per_cell{ 2 }, cur_direction{LEFT}, speed{ 30 }, speed_gain{2}, max_speed{ 4 } { head.update_pos(x, y); } void update(std::pair<int32_t, int32_t> borders, int32_t cell_size, int32_t frame); void render(SDL_Renderer* renderer, int32_t cell_size) const; bool check_food_collision(std::pair<int32_t, int32_t> food_pos); private: void update_position(std::pair<int32_t, int32_t> borders, int32_t cell_size, int32_t frame); void update_direction(); bool check_self_collision(); void reset(); int32_t length; int32_t growth_remaining; int32_t growth_per_cell; Direction cur_direction; int32_t speed; // lower is faster, updates player every {speed} frames int32_t speed_gain; int32_t max_speed; Cell head; std::vector<Cell> body; std::pair<int32_t, int32_t> get_next_pos(int32_t cell_size, std::pair<int32_t, int32_t> cur_pos); }; } #endif Player.cpp #include "Player.h" namespace Snake { void Player::update(std::pair<int32_t, int32_t> borders, int32_t cell_size, int32_t frame) { update_direction(); update_position(borders, cell_size, frame); if (check_self_collision()) { reset(); } } void Player::update_position(std::pair<int32_t, int32_t> borders, int32_t cell_size, int32_t frame) { if (frame % speed) return; // only update when neccessary if (growth_remaining) { body.insert(std::begin(body), Cell(head.get_pos().first, head.get_pos().second)); growth_remaining--; } else { if (!body.empty()) { for (uint32_t i = body.size() - 1; i > 0; --i) { body[i].update_pos(body[i-1].get_pos().first, body[i - 1].get_pos().second); } body[0].update_pos(head.get_pos().first, head.get_pos().second); } } auto new_pos = get_next_pos(cell_size, head.get_pos()); // implement player moving over border if (new_pos.first > borders.second) { new_pos.first = 0; } if (new_pos.first < 0) { new_pos.first = borders.second - cell_size; } if (new_pos.second > borders.first) { new_pos.second = 0; } if (new_pos.second < 0) { new_pos.second = borders.first - cell_size; } head.update_pos(new_pos.first, new_pos.second); } void Player::update_direction() { const Uint8* key_state = SDL_GetKeyboardState(nullptr); if (key_state[SDL_SCANCODE_W]) { cur_direction = UP; } else if (key_state[SDL_SCANCODE_S]) { cur_direction = DOWN; } else if (key_state[SDL_SCANCODE_A]) { cur_direction = LEFT; } else if (key_state[SDL_SCANCODE_D]) { cur_direction = RIGHT; } } bool Player::check_self_collision() { for (auto& bodypart : body) { if (head.get_pos() == bodypart.get_pos()) { return true; } } return false; } void Player::reset() { body.clear(); growth_remaining = 2; speed = 30; } void Player::render(SDL_Renderer* renderer, int32_t cell_size) const { head.render(renderer, cell_size, SDL_Color{255, 100, 0, 255}); for (auto& cell : body) { cell.render(renderer, cell_size); } } bool Player::check_food_collision(std::pair<int32_t, int32_t> food_pos) { if (head.get_pos() == food_pos) { growth_remaining += growth_per_cell; speed -= speed_gain; // increase speed if (speed < max_speed) { speed = max_speed; } return true; } return false; } std::pair<int32_t, int32_t> Player::get_next_pos(int32_t cell_size, std::pair<int32_t, int32_t> cur_pos) { std::pair<int32_t, int32_t> next_pos(cur_pos); switch (cur_direction) { case UP: { next_pos.second -= cell_size; break; } case DOWN: { next_pos.second += cell_size; break; } case RIGHT: { next_pos.first += cell_size; break; } case LEFT: { next_pos.first -= cell_size; break; } } return next_pos; } } Cell.h #ifndef SNAKE_CELL #define SNAKE_CELL #include <SDL.h> #include <cstdint> #include <utility> namespace Snake { class Cell { public: Cell() = default; Cell(int32_t x, int32_t y) : pos{ x, y } {} void render(SDL_Renderer* renderer, int32_t cell_size, SDL_Color color = SDL_Color{ 255, 30, 20, 255 }) const; void update_pos(int32_t x, int32_t y) { pos.first = x; pos.second = y; } std::pair<int32_t, int32_t> get_pos() const { return pos; } private: std::pair<int32_t, int32_t> pos; }; } #endif Cell.cpp #include "Cell.h" namespace Snake { void Cell::render(SDL_Renderer* renderer, int32_t cell_size, SDL_Color color) const { // set draw color to red SDL_SetRenderDrawColor(renderer, color.r, color.g, color.b, color.a); SDL_Rect rect = { pos.first + 1, pos.second + 1, cell_size - 1, cell_size - 1 }; SDL_RenderFillRect(renderer, &rect); } } Answer: double deletion issue: Snake::SDL_Framework framework("Snake", 800, 800); Snake::Game game(framework); ... Game::Game(const SDL_Framework& uframework) : frame{0} { framework = uframework; ... SDL_Framework::~SDL_Framework() { SDL_Quit(); SDL_DestroyRenderer(renderer); SDL_DestroyWindow(main_window); } We have one SDL_Framework object in main(). Then we copy it to a member variable in the Game constructor. So now we have two SDL_Framework objects. That means two destructor calls... and we're calling SDL_DestroyRenderer and SDL_DestroyWindow twice with the same handles! We might get away with it right now because the SDL_Quit call is first, otherwise we'd probably be trying to free the same memory twice. What we really need is to make SDL_Framework non-copyable and store only a reference or pointer to it in Game. Alternatively we could make SDL_Framework moveable only, and move it into Game instead. window size bug: SDL_CreateWindow(window_name.c_str(), SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, height, width, SDL_WINDOW_SHOWN); The height and width parameters are the wrong way around in this call. (It's conventional to use (x, y) for coordinates instead of (y, x)). naming nitpick: void SDL_Framework::update() { SDL_RenderPresent(renderer); } update() is very vague and potentially confusing given its use elsewhere. present_buffer() or swap_buffers() or something similar might be a better name. access-control nitpick: class Game { public: Game(const SDL_Framework& uframework); void run(); void update(); void render(); We only call the run() function from outside the class, so update() and render() could be private. coordinate class: std::pair<int32_t, int32_t> As the comments point out, it's definitely worth using a simple struct so we know which coordinate is which: template<class T> struct Vec2 { T x, y; }; frame-rate (in)dependence: int32_t speed; // lower is faster, updates player every {speed} frames ... if (frame % speed) return; // only update when neccessary This might work if we had a fixed frame-rate. But I don't see anything in the main loop limiting the frame-rate. So on a fast computer, the snake will be much quicker than on a slower one. We should use the facilities in the <chrono> standard header to make an actual timer for this, e.g. in Player::update_position(): auto now = std::chrono::high_resolution_clock::now(); if (now > last_update + time_between_updates) { last_update = now; ... do update ... } Where time_between_updates works similarly to the speed variable, but represents an actual time. unnecessary class: class Cell { public: Cell() = default; Cell(int32_t x, int32_t y) : pos{ x, y } {} void render(SDL_Renderer* renderer, int32_t cell_size, SDL_Color color = SDL_Color{ 255, 30, 20, 255 }) const; void update_pos(int32_t x, int32_t y) { pos.first = x; pos.second = y; } std::pair<int32_t, int32_t> get_pos() const { return pos; } private: std::pair<int32_t, int32_t> pos; }; I'm not sure we really need this class. It's just a coordinate pair. The rendering could be a free function, or inlined where it's used. player update improvements: body.insert(std::begin(body), Cell(head.get_pos().first, head.get_pos().second)); Inserting at the front of a vector is quite slow, because we have to move every other element backwards to make room. Perhaps a deque would be a better? (Though iteration may be slightly slower). if (!body.empty()) { for (uint32_t i = body.size() - 1; i > 0; --i) { body[i].update_pos(body[i-1].get_pos().first, body[i - 1].get_pos().second); } body[0].update_pos(head.get_pos().first, head.get_pos().second); } Every cell simply follows the next cell, right? In that case, can't we just erase the last cell in the vector, and insert a new cell at the front? (Perhaps we could use some sort of fancy circular index system, where we just shift over the head index and change the last (now first) coordinate... but that's probably a bit overcomplicated).
{ "domain": "codereview.stackexchange", "id": 40716, "tags": "c++, reinventing-the-wheel, c++17, snake-game, sdl" }
Once a black hole is formed, is there anything other than Hawking radiation which shortens its life?
Question: Hawking radiation is supposed to very slowly evaporate a black hole (terms and conditions apply :] ). Apart from Hawking radiation, is there any mechanism or effect that can make a black hole cease to exist? Or once they are formed are they expected to exist in this form "forever"? PS: I posted the same question to Physics but I think it applies well to both SEs. So I hope it is okay to try here as wwell. Answer: It can merge with another black hole, if that counts. A simulation of that is illustrated here That aside, no black hole with decay more quickly, as far as I can find out, than the "normal" decay of a non-rotating, uncharged black hole into which nothing is falling. Let's call this the "standard rate of decay". "Feeding it" (allowing any matter or energy to fall into it), will reduce the decay below the standard rate, or even reverse it. Since the CMB pervades space and will fall into the black hole, blocking this (for instance by surrounding the hole by an opaque refrigerated shell) will be necessary to allow it to decay at the standard rate. Indeed for a black hole of mass over roughly $10^{22} kg$ the CMB would feed it faster than it decays, if not blocked. Energy from the spin of the black hole can be extracted using the Penrose process. Since this answer shows that rotation also reduces the emission of Hawking radiation, you can clearly speed up the decay by extracting energy from as much of the spin as possible, but the fastest decay you get is, once again, the standard rate.
{ "domain": "astronomy.stackexchange", "id": 3208, "tags": "black-hole" }
make eclipse-project roslib error
Question: Greeting, I am trying to import sbpl into eclipse. I am getting following error. :/opt/ros/diamondback/stacks/motion_planners/sbpl$ make eclipse-project mv Makefile Makefile.ros cmake -G"Eclipse CDT4 - Unix Makefiles" -Wno-dev . -- The C compiler identification is GNU -- The CXX compiler identification is GNU -- Check for working C compiler: /usr/bin/gcc -- Check for working C compiler: /usr/bin/gcc -- works -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working CXX compiler: /usr/bin/c++ -- Check for working CXX compiler: /usr/bin/c++ -- works -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done [rosbuild] Building package sbpl [rosbuild] Error from syntax check of sbpl/manifest.xml Traceback (most recent call last): File "<string>", line 1, in <module> ImportError: No module named roslib.manifest CMake Error at /opt/ros/diamondback/ros/core/rosbuild/private.cmake:87 (message): [rosbuild] Syntax check of sbpl/manifest.xml failed; aborting Call Stack (most recent call first): /opt/ros/diamondback/ros/core/rosbuild/public.cmake:154 (_rosbuild_check_manifest) CMakeLists.txt:4 (rosbuild_init) -- Configuring incomplete, errors occurred! make: *** [eclipse-project] Error 1 I am getting it with all the packages. Earlier everything was fine! Could it be because of some change in settings by ROS Electric. I installed Electric from Ubuntu debs. echo $PYTHONPATH /opt/ros/diamondback/ros/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games::/opt/ros/diamondback/ros/core/roslib echo $ROS_PACKAGE_PATH /opt/ros/diamondback/stacks Could it be related to PYTHONPATH variable ?? prince Originally posted by prince on ROS Answers with karma: 660 on 2012-03-29 Post score: 0 Answer: I noticed two things which are strange in your output: You talk about Electric, but your paths show Diamondback. Do the errors persist after running the following command? source /opt/ros/electric/setup.bash Probably unrelated to your specific problem, but still important: You seem to be running make eclipse-project in a directory outside of your home directory, specifically /opt/ros/diamondback/.... This is evil. This directory tree should not be writable by a normal user, because it is managed by the Debian package manager (in the best case, Linux will simply refuse to let you change files there; in the worst case, you have changed the permissions or run the commands as root to bypass that security precaution). Debian updates will silently overwrite your changes, and more importantly, if you mess up something there, the resulting bugs are hard to track. If you have already changed something there, please remove all ROS-related Debian packages, delete /opt/ros/... and start over. Whenever you feel the need to edit something in /opt/..., instead copy the package/stack over to your home directory first. Just to make sure, I don't mean to sound rude or impatient; non-writability of /opt/ros/... is a concept that many people get wrong, but I've seen it to cause pain a lot. :-) Originally posted by Martin Günther with karma: 11816 on 2012-04-13 This answer was ACCEPTED on the original site Post score: 1 Original comments Comment by prince on 2012-04-13: i agree about non-writability of /opt/ros/.. . It is just that while importing the project in eclipse, i make a local copy in my workspace. Your suggestion of making a copy of package itself also makes sense. Comment by prince on 2012-04-13: I have both releases, Electric and Diamondback, installed. Problem started after electric installation ! Somehow my pythonpath got screwed. later i added ${ROS_ROOT}/ros/core/roslib/src to PYTHONPATH and it worked. Comment by Martin Günther on 2012-04-13: You can have both Electric and Diamondback installed in parallel. By sourcing the correct setup.bash file (as explained above), your Pythonpath should be set correctly.
{ "domain": "robotics.stackexchange", "id": 8784, "tags": "eclipse" }
Is a MSC Adams simulation with smaller steps always more reliable?
Question: In a given simulation in MSC Adams, if I use a 1 or 0.1 step increment, I get the (favorable) result I intuitively assumed would occur, but if I use step increments of 0.01 or 0.001 a different (and unfavorable) result occurs. One would assume the finer-detailed result would generate fewer simulation false-result-inducing artifacts. Should I always assume the more granular simulation will result the more realistic and reliable result? Answer: This is a common issue on all numerical methods (not just multibody problems), i.e. the tradeoff between the following three types of error: Round off error Discretization Truncation (sometimes the 2 and 3 are included in the same term). All of them are forms of quantization error. These three types have a dependence on mesh size (either spatial or temporal). You can see their dependence, in the following image. As you reduce the time step (or mesh size) the discretization error decreases, however then what happens is that you get the round off error to increase. Usually, it's the balance between those two that dominates the total error.
{ "domain": "engineering.stackexchange", "id": 3578, "tags": "simulation" }
On the equivalence of the Schrödinger and Heisenberg (and all other) pictures
Question: The Schrödinger and Heisenberg (and, indeed, infinitely many other pictures) are often referred to as equivalent descriptions of quantum dynamics in a given system. I'm wondering two things in particular: What exactly do we mean by equivalent? What is the proof of their equivalence? Elaborating briefly now, I think that the answer to (1) is simply that they reproduce the same predictions (i.e. probabilities) for all observable experiments. That then begets question number (2); how does one prove that this is indeed the case? In particular, it's usually shown (quite simply) in textbooks that expectation values are preserved under different shifts to different pictures. However, that is not sufficient to say that they are equivalent. Is there a most general statement/proof of this commonly made (and admittedly intuitively expected) claim? Answer: Consider two operators $A$, $B$ such that $B=UAU^\dagger $ $|e_k\rangle$ and $U$ is unitary. If the eigentstates of $A$ are $$A|e_k\rangle=\mu_k|e_k\rangle$$ the states $|e'_k\rangle=U|e_k\rangle$ are the eigenstates of $B$ (and vice versa): $$B|e_k\rangle=UAU^\dagger(U|e_k\rangle)=U\mu_k|e_k\rangle=\mu_k|e'_k\rangle$$ Let $O(t)$ be an observable in the Heisenberg picture and $S(t,t_0)$ the time evolution such that: $$O(t)=S^{-1}(t,t_0)O(t_0)S(t,t_0)$$ Now let $|n(t)\rangle$ be an eigenstate of $O(t)$, in the Heisenberg picture to get the probability to measure at time $t$ the eigenvalue associated (at any time) with the eigenstate $|n\rangle$ you calculate: $$\langle n(t)|\psi(t_0)\rangle$$ Referring to the proof above $S^{-1}=U$ so: $$ |n(t)\rangle= S(t,t_0)^{-1} |n(t_0)\rangle$$ and: $$\langle n(t)|\psi(t_0)\rangle =\langle n(t_0)|S(t,t_0)|\psi(t_0)\rangle$$ as in the Schroedinger picture.
{ "domain": "physics.stackexchange", "id": 88969, "tags": "quantum-mechanics, observables, time-evolution, quantum-states" }
How can the surface of a conducting shell be equipotential when a charge is introduced inside the cavity?
Question: Here is the confusion. Take a spherical conducting shell of a certain thickness. When a postive charge is introduced into the shell the inside surface collects negative charges and the outter surface postive charges. So far so good. Now gauss' law tells be there is no field in the conducting shell and thus its outter and inner surfaces must be equipotential. However the inside surface has negative charges and thus is at a negative potential and the outter surface has postive charges thus is at a postive potential. And thus the inner and outter surfaces have opposite signs for potential and thus are different. This seems to falsely contradict the earlier gauss' law conclusion which I know is correct. Where did I go wrong? Answer: When a positive charge is introduced into the shell the inside surface collects negative charges and the outer surface positive charges. These are induced charges and the magnitudes of the positive and negative induced charges are the same. These induced charges set up electric fields inside the conducting shell which exactly cancel out the electric field produced inside the conducting shell by the positive charge. Thus there is no potential difference between the inner and outer surfaces of the conductor. Even though there are negative charges on the inside surface of the conducting sphere the potential of that inside surface would be positive relative to infinity (taken as the zero of potential) as a test positive charge would have to do no work whilst moving though the conducting shell and the outside of the shell is at a positive potential.
{ "domain": "physics.stackexchange", "id": 56933, "tags": "electrostatics, conductors" }
Doppler shift in satellite communication systems
Question: Does someone have a table with doppler shift range for satellite communication systems? Is it S band or X band? I have seen somewhere, but I can't find it. Answer: I would recommend playing around with one of the predict libraries e.g. https://github.com/nsat/pypredict You can go find a TLE of a satellite you're interested in from a public database, and enter ground coordinates and time to compute the relative velocity (doppler offset), acceleration (doppler rate), jerk (doppler acceleration) etc. To put some numbers to it, I typically consider S-band (2GHz) doppler at LEO to be a maximum of around +/- 100 kHz or so (corresponds to around 15 km/sec of relative velocity). But it really depends on your orbit, altitude, and ground location - which is why I recommend the above tool. With a typical overhead pass of a satellite over a ground station, the doppler profile will look like an inverted S-curve. It will start off with a maximal positive doppler shift (i.e. maximal relative velocity) as the satellite comes up over the horizon. The shift will eventually start decreasing quickly as the satellite passes over head (this is where the doppler offset is zero and the satellite acceleration (doppler rate) is maximal) until it heads back toward the horizon and towards its maximal negative doppler shift.
{ "domain": "dsp.stackexchange", "id": 11011, "tags": "doppler" }
Reading data with different but similar semantics
Question: I am writing code that reads data from a DB, where boolean values are stored as 0/1 nullable integers. I decided to write a simple function to encapsulate the logic which converts the integer values to boolean. Thing is, I sometimes need to regard the db null value as boolean false, and sometimes just as null. So I've come up with the following two extension methods: // Read the specified column value from row as nullable boolean, // treating integer 0 as false, and any other non-null value as true. public static bool? ReadAsBoolNullable(this DataRow row, string columnName) { if (row.IsNull(columnName)) { return null; } else { return row[columnName] != 0; } } // Read the specified column value from row as boolean, // treating null and integer 0 as false, and any other value as true. public static bool ReadAsBool(this DataRow row, string columnName) { bool? data = row.ReadAsBoolNullable(columnName); return data.HasValue ? data.Value : false; } My main concern about this code is that I potentially have hundreds of millions of records, each having up to 10 boolean (not nullable) columns. So the second function will be executed a lot and will have to call the first one a lot. Could this damage performance significantly, given that the function itself doesn't do much? Answer: As with all performance related questions, if and only if you can gather profiling evidence that shows that your current code is meaningfully detrimental to performance, don't call the first method in the second one, but rather repeat the code. This goes slightly against DRY (just barely, considering the very short code), but you will have to weigh the value of maintainability vs that of whatever performance gain is achieved. In practice, I suspect that the effect on performance will be negligible if not non-existent. It's not as if you're not calling a slew of other methods and properties anyways. The key point is profile profile profile.
{ "domain": "codereview.stackexchange", "id": 15957, "tags": "c#, performance" }
If a water molecule is neutral, how do water molecules attract one another by electric force and form water?
Question: If a water molecule is neutral, how do water molecules attract one another by electric force and form water? This makes no sense to me (I'm new to physics) and my textbook didn't explain this well. Answer: The water molecule is neutral on overall basis, i.e., the water molecule as a whole has no net charge. The water molecule is not linear rather it has a bent shape with two hydrogens on the same side. This happens because of the lone pair-bond pair repulsions. The oxygen is a more electronegative element than hydrogen, i.e., oxygen has high electron-attracting power. Therefore, it attracts the bond pair electrons towards itself which gives a partial negative charge to the oxygen and a partial positive charge to the hydrogen. This gives a possibility of the positive part of a molecule being attracted towards the negative part of another molecule. This is how water molecules attract each other. The bonds formed between the hydrogens and the oxygen are termed as hydrogen bonds and these bonds are quite strong, which is why water with very low molecular mass has unusually high melting and boiling point. As a matter of fact, even molecules with zero dipole moment can also attract each other. There exist weak Van der Waals forces (London Dispersion Forces) which are caused by induced dipoles. This is responsible for helium to to stay in liquid form at 4K. London Dispersion Force - Induced Dipole Forces
{ "domain": "physics.stackexchange", "id": 31590, "tags": "electrostatics, water, physical-chemistry, molecules, dipole" }
Are CNNs insensitive to rotations and shifts in images?
Question: Can CNNs predict well if they are trained on canonical-like images but tested on a version of images that are little bit shifted? I tried it using mnist dataset and found the contrary. The accuracy of the test set that was shifted was very low as compared to MLPs. Answer: If you use max-pooling layers, they may be insensetive to small shifts but not that much. If you want your network to be able to be invariant to transformations, such as translations and shifts or other types of customary transformations, you have two solutions, at least as far as I know: Increasing the size of data-set Using spatial transformers Take a look at What is the state-of-the art ANN architecture for MNIST and Why do convolutional neural networks work. Thanks to one of our friends, another way is to use transfer learning after data-augmentation.
{ "domain": "datascience.stackexchange", "id": 3556, "tags": "machine-learning, neural-network, deep-learning, cnn, mnist" }
Attribute error: type object 'type' has no attribute '_TYPE_SUPPORT'
Question: This is the main portion of the error I am getting, AttributeError: type object 'type' has no attribute '_TYPE_SUPPORT' This might be a ROS 1 message type but it should be a ROS 2 message type. Make sure to source your ROS 2 workspace after your ROS 1 workspace. Here is my full traceback, Traceback (most recent call last): File "/home/mrumel/fisher_agc/AGCROS/install/server/lib/server/where_go", line 11, in <module> load_entry_point('server==0.0.0', 'console_scripts', 'where_go')() File "/home/mrumel/fisher_agc/AGCROS/install/server/lib/python3.8/site-packages/server/where_go.py", line 30, in main where_go = WhereGo() File "/home/mrumel/fisher_agc/AGCROS/install/server/lib/python3.8/site-packages/server/where_go.py", line 12, in __init__ self.srv = self.create_service(WhereGo, 'get_destination', self.where_go_callback) File "/opt/ros/foxy/lib/python3.8/site-packages/rclpy/node.py", line 1295, in create_service check_for_type_support(srv_type) File "/opt/ros/foxy/lib/python3.8/site-packages/rclpy/type_support.py", line 20, in check_for_type_support ts = msg_type.__class__._TYPE_SUPPORT AttributeError: type object 'type' has no attribute '_TYPE_SUPPORT' This might be a ROS 1 message type but it should be a ROS 2 message type. Make sure to source your ROS 2 workspace after your ROS 1 workspace. I don't have ROS1 installed on this machine at all so I do not know why It may be suggesting that that is the issue. Here is my Environment variables, ROS_VERSION=2 SESSION_MANAGER=local/ROS-Host-Test:@/tmp/.ICE-unix/1687,unix/ROS-Host-Test:/tmp/.ICE-unix/1687 ROS_PYTHON_VERSION=3 PWD=/home/mrumel/fisher_agc/AGCROS AMENT_PREFIX_PATH=/home/mrumel/fisher_agc/AGCROS/install/station:/home/mrumel/fisher_agc/AGCROS/install/server:/home/mrumel/fisher_agc/AGCROS/install/mach2api:/home/mrumel/fisher_agc/AGCROS/install/agc_interfaces:/home/mrumel/fisher_agc/AGCROS/install/AGC:/opt/ros/foxy CMAKE_PREFIX_PATH=/home/mrumel/fisher_agc/AGCROS/install/agc_interfaces COLCON_PREFIX_PATH=/home/mrumel/fisher_agc/AGCROS/install PYTHONPATH=/home/mrumel/fisher_agc/AGCROS/install/station/lib/python3.8/site-packages:/home/mrumel/fisher_agc/AGCROS/install/server/lib/python3.8/site-packages:/home/mrumel/fisher_agc/AGCROS/install/mach2api/lib/python3.8/site-packages:/home/mrumel/fisher_agc/AGCROS/install/agc_interfaces/lib/python3.8/site-packages:/home/mrumel/fisher_agc/AGCROS/install/AGC/lib/python3.8/site-packages:/opt/ros/foxy/lib/python3.8/site-packages LD_LIBRARY_PATH=/home/mrumel/fisher_agc/AGCROS/install/agc_interfaces/lib:/opt/ros/foxy/opt/yaml_cpp_vendor/lib:/opt/ros/foxy/opt/rviz_ogre_vendor/lib:/opt/ros/foxy/lib/x86_64-linux-gnu:/opt/ros/foxy/lib ROS_LOCALHOST_ONLY=0 ROS_DISTRO=foxy Originally posted by maxunm on ROS Answers with karma: 35 on 2021-06-24 Post score: 0 Answer: I will be adding this answer here for anyone who has this issue in the future... It seems that colcon/ros didn't like the name I had used for my .srv file (WhereGo) and I rebased it to GetDestination and it all magically fixed itself it seems. Originally posted by maxunm with karma: 35 on 2021-06-24 This answer was ACCEPTED on the original site Post score: 2 Original comments Comment by kramer on 2022-06-23: I had a similar error with an action client. My (unthinking) mistake was in passing the action type as its name rather than the type (i.e., cli = ActionClient(node, 'example/action/Fibonacci', 'fibonacci') when it should have been cli = ActionClient(node, Fibonacci, 'fibonacci')). Comment by Tempest on 2022-08-09: Same... I renamed my srv file to 'Oec.srv' from 'OddEvenCheck,srv' and ros ran its abraCaDabra.... weird....
{ "domain": "robotics.stackexchange", "id": 36573, "tags": "ros2" }
DRY-ing up some rspec
Question: I have an rspec spec test that has two sets of the same tests (from 24 Pull Requests). I thought I'd refactor the spec whilst I fixed something else. Normally I just do a basic each ... do array to DRY things up: require 'spec_helper' describe ReminderMailer, type: :mailer do # Half the code yay! :D ['daily','weekly'].each do |time_format| describe time_format do let(:time_format) { time_format } let(:user) do mock_model(User, nickname: 'David', email: 'david@example.com', languages: ['Ruby'], skills: [], pull_requests: double(:pull_request, year: []), suggested_projects: []) end # How do I do this bit better? let(:mail) { if time_format == 'daily' ReminderMailer.daily(user) else ReminderMailer.weekly(user) end } end I couldn't figure out how to use the time_format with the send command, so I could do something like: let(:mail) { ReminderMailer.send(timeformat(user) } How could I do this? Answer: The snippet you asked to re-write: let(:mail) { ReminderMailer.send(time_format, user) } Now, IMO using each to write specs is a bad practice. Why? Because rspec already provides idiomatic infrastructure to keep specs DRYed: use before and shared_examples. I'd write: describe ReminderMailer, type: :mailer do let(:user) { ... } shared_examples "a reminder mailer" do |subject:, body:| it 'renders the subject' do expect(mail.subject).to eq(subject) end it 'renders the receiver email' do expect(mail.to).to eq([user.email]) end it 'renders the sender email' do expect(mail['From'].to_s).to eq('24 Pull Requests <info@24pullrequests.com>') end it 'uses nickname' do expect(mail.body.encoded).to match(user.nickname) end it 'contains periodicity in body' do expect(mail.body.encoded).to match(body) end end describe 'daily' do let(:mail) { ReminderMailer.daily(user) } it_behaves_like "a reminder mailer", subject: '[24 Pull Requests] Daily Reminder', body: 'today' end describe 'weekly' do let(:mail) { ReminderMailer.weekly(user) } it_behaves_like "a reminder mailer", subject: '[24 Pull Requests] Weekly Reminder', body: 'week' end end
{ "domain": "codereview.stackexchange", "id": 11021, "tags": "ruby, ruby-on-rails, rspec" }
Does FPT allow for doubling the parameter?
Question: I have recently come across a result that showed that a given problem is in FPT when parameterized by the treewidth of a graph. However, they did this by showing that the problem is in FPT when parameterized by the treewidth of an augmented version of the graph that had treewidth of at most 2k+1 when the treewidth of the original graph is k. Why is it possible do this? Answer: Let $k$ be the parameter. Is an algorithm has a running time of the form $O(f(2k+1) \cdot \text{poly}(n))$ for a suitable function $f$, then it also has a running time of $O(g(k) \cdot \text{poly}(n) )$ for $g(k) = f(2k+1)$.
{ "domain": "cs.stackexchange", "id": 19961, "tags": "algorithms, complexity-theory, parameterized-complexity, treewidth" }
How to map PDB chains to Uniprot IDs using API services
Question: I have a lot of PDB IDs and I need to get uniprot fasta sequences of these PDB IDs special chains by API services. For example, imagine that I need to get fasta sequence of '1kf6' 'A' chain. The uniprot entry (Accession) for this '1kf6' chain is 'P00363'. If I find a way to get this entry, I can simply retrieve its fasta sequence using bioservices package: from bioservices import * u = UniProt() sequence = u.retrieve("P00363","fasta") print(sequence) The main problem is that I don't know how can I find this uniprot entry("P00363" here) for each chains in PDB IDs(here I imagine that I don't know this "P00363" is the uniprot entry for '1kf6' 'A' chain and I'm trying to find it using API services). I tried to use ID mapping. In ID mapping cross-references, the chain name for each entry is specified and if I can add a cross-reference column to u.search('1kf6', columns="id,entry name,length, genes,pdb"), I can simply find the entry related to my favourite chain. The following code returns some entries each related to one of '1kf6' chains. The problem is that I don't know exactly which entry is related to 'A' chain that here is my favorite chain. In the following code, id searches for entry and other parameters search for sth else related to them: from bioservices import * u = UniProt() res = u.search("1kf6",columns="id,entry name,length, genes") print(res) Each one of the following words can be used and added to columns part: 'id', 'entry name', 'genes', 'genes(PREFERRED)', 'genes(ALTERNATIVE)', 'genes(OLN)', 'genes(ORF)', 'organism', 'organism-id', 'protein names', 'proteome', 'lineage(ALL)', 'lineage-id', 'virus hosts', 'fragement', 'sequence', 'length', 'mass', 'encodedon', 'comment(ALTERNATIVE PRODUCTS)', 'comment(ERRONEOUS GENE MODEL PREDICTION)', 'comment(ERRONEOUS INITIATION)', 'comment(ERRONEOUS TERMINATION)', 'comment(ERRONEOUS TRANSLATION)', 'comment(FRAMESHIFT)', 'comment(MASS SPECTROMETRY)', 'comment(POLYMORPHISM)', 'comment(RNA EDITING)', 'comment(SEQUENCE CAUTION)', 'feature(ALTERNATIVE SEQUENCE)', 'feature(NATURAL VARIANT)', 'feature(NON ADJACENT RESIDUES)', 'feature(NON STANDARD RESIDUE)', 'feature(NON TERMINAL RESIDUE)', 'feature(SEQUENCE CONFLICT)', 'feature(SEQUENCE UNCERTAINTY)', 'version(sequence)', 'domains', 'domain', 'comment(DOMAIN)', 'comment(SIMILARITY)', 'feature(COILED COIL)', 'feature(COMPOSITIONAL BIAS)', 'feature(DOMAIN EXTENT)', 'feature(MOTIF)', 'feature(REGION)', 'feature(REPEAT)', 'feature(ZINC FINGER)', 'ec', 'comment(ABSORPTION)', 'comment(CATALYTIC ACTIVITY)', 'comment(COFACTOR)', 'comment(ENZYME REGULATION)', 'comment(FUNCTION)', 'comment(KINETICS)', 'comment(PATHWAY)', 'comment(REDOX POTENTIAL)', 'comment(TEMPERATURE DEPENDENCE)', 'comment(PH DEPENDENCE)', 'feature(ACTIVE SITE)', 'feature(BINDING SITE)', 'feature(DNA BINDING)', 'feature(METAL BINDING)', 'feature(NP BIND)', 'feature(SITE)', 'go', 'go(biological process)', 'go(molecular function)', 'go(cellular component)', 'go-id', 'interpro', 'interactor', 'comment(SUBUNIT)', 'citation', 'citationmapping', 'created', 'last-modified', 'sequence-modified', 'version(entry)', '3d', 'feature(BETA STRAND)', 'feature(HELIX)', 'feature(TURN)', 'comment(SUBCELLULAR LOCATION)', 'feature(INTRAMEMBRANE)', 'feature(TOPOLOGICAL DOMAIN)', 'feature(TRANSMEMBRANE)', 'annotation score', 'score', 'features', 'comment(CAUTION)', 'comment(TISSUE SPECIFICITY)', 'comment(GENERAL)', 'keywords', 'context', 'existence', 'tools', 'reviewed', 'feature', 'families', 'subcellular locations', 'taxonomy', 'version', 'clusters', 'comments', 'database', 'keyword-id', 'pathway', 'score' I used some of the above words in columns part but they couldn't retrieve chain name. Anyone know which one of the above parameters can give me the chain name for this PDB ID? or anyone has any other idea for finding uniprot entry(UniprotKB AC) related to each PDB ID chain by API services? Answer: I found the answer and I thought it may be useful for others. we can use pypdb package for that purpose (it works in unix systems): import pypdb all_info = pypdb.get_all_info('1kf6') print(all_info) the output is as follows: {'polymer': [{'@entityNr': '1', '@length': '602', '@type': 'protein', '@weight': '66057.6', 'chain': [{'@id': 'A'}, {'@id': 'M'}], 'Taxonomy': {'@name': 'Escherichia coli', '@id': '562'}, 'macroMolecule': {'@name': 'Fumarate reductase flavoprotein subunit', 'accession': {'@id': 'P00363'}}, 'polymerDescription': {'@description': 'FUMARATE REDUCTASE FLAVOPROTEIN'}, 'enzClass': {'@ec': '1.3.5.4'}}, {'@entityNr': '2', '@length': '243', '@type': 'protein', '@weight': '27021.9', 'chain': [{'@id': 'B'}, {'@id': 'N'}], 'Taxonomy': {'@name': 'Escherichia coli', '@id': '562'}, 'macroMolecule': {'@name': 'Fumarate reductase iron-sulfur subunit', 'accession': {'@id': 'P0AC47'}}, 'polymerDescription': {'@description': 'FUMARATE REDUCTASE IRON-SULFUR PROTEIN'}, 'enzClass': {'@ec': '1.3.5.1'}}, {'@entityNr': '3', '@length': '130', '@type': 'protein', '@weight': '14898.8', 'chain': [{'@id': 'C'}, {'@id': 'O'}], 'Taxonomy': {'@name': 'Escherichia coli', '@id': '562'}, 'macroMolecule': {'@name': 'Fumarate reductase subunit C', 'accession': {'@id': 'P0A8Q0'}}, 'polymerDescription': {'@description': 'FUMARATE REDUCTASE 15 KDA HYDROPHOBIC PROTEIN'}}, {'@entityNr': '4', '@length': '119', '@type': 'protein', '@weight': '13118.9', 'chain': [{'@id': 'D'}, {'@id': 'P'}], 'Taxonomy': {'@name': 'Escherichia coli', '@id': '562'}, 'macroMolecule': {'@name': 'Fumarate reductase subunit D', 'accession': {'@id': 'P0A8Q3'}}, 'polymerDescription': {'@description': 'FUMARATE REDUCTASE 13 KDA HYDROPHOBIC PROTEIN'}}], 'id': '1KF6'} This output gives all the information related to all chains and we can simply find uniprotkb AC of each chain in this output. I also found another answer for this question. This answer can be found here and the following code: import requests from xml.etree.ElementTree import fromstring pdb_id = '1kf6.A' pdb_mapping_url = 'http://www.rcsb.org/pdb/rest/das/pdb_uniprot_mapping/alignment' uniprot_url = 'http://www.uniprot.org/uniprot/{}.xml' def get_uniprot_accession_id(response_xml): root = fromstring(response_xml) return next( el for el in root.getchildren()[0].getchildren() if el.attrib['dbSource'] == 'UniProt' ).attrib['dbAccessionId'] def get_uniprot_protein_name(uniport_id): uinprot_response = requests.get( uniprot_url.format(uniport_id) ).text return fromstring(uinprot_response).find('.//{http://uniprot.org/uniprot}recommendedName/{http://uniprot.org/uniprot}fullName' ).text def map_pdb_to_uniprot(pdb_id): pdb_mapping_response = requests.get( pdb_mapping_url, params={'query': pdb_id} ).text uniprot_id = get_uniprot_accession_id(pdb_mapping_response) uniprot_name = get_uniprot_protein_name(uniprot_id) return { 'pdb_id': pdb_id, 'uniprot_id': uniprot_id, 'uniprot_name': uniprot_name } print (map_pdb_to_uniprot(pdb_id)) The output of this code is as follows: {'pdb_id': '1kf6.A', 'uniprot_id': 'P00363', 'uniprot_name': 'Fumarate reductase flavoprotein subunit'}
{ "domain": "bioinformatics.stackexchange", "id": 2266, "tags": "fasta, python, pdb, api" }
Proof that two lines intersect each other at precisely one point in Idris
Question: I recently attempted to prove that given two lines, they intersect at one and only one point in Idris. Here is what I came up with: interface (Eq line, Eq point) => Plane line point where -- Abstract notion for saying three points lie on the same line. colinear : point -> point -> point -> Bool coplanar : point -> point -> point -> Bool contains : line -> point -> Bool -- Intersection between two lines intersects_at : line -> line -> point -> Bool -- If two lines l and m contain a point a, they intersect at that point. intersection_criterion : (l : line) -> (m : line) -> (a : point) -> (contains l a = True) -> (contains m a = True) -> (intersects_at l m a = True) -- If l and m intersect at a point a, then they both contain a. intersection_result : (l : line) -> (m : line) -> (a : point) -> (intersects_at l m a = True) -> (contains l a = True, contains m a = True) -- For any two distinct points there is a line that contains them. line_contains_two_points : (a :point) -> (b : point) -> (a /= b) = True -> (l : line ** (contains l a = True, contains l b = True )) -- If two points are contained by l and m then l = m two_pts_define_line : (l : line) -> (m : line) -> (a : point) -> (b : point) -> ((a /= b) = True) -> contains l a = True -> contains l b = True -> contains m a = True -> contains m b = True -> ((l == m) = True) same_line_same_pts : (l : line) -> (m : line) -> (a : point) -> (b : point) -> ((l /= m) = True) -> contains l a = True -> contains l b = True -> contains m a = True -> contains m b = True -> ((a == b) = True) -- There exists 3 non-colinear points. three_non_colinear_pts : (a : point ** b : point ** c : point ** (colinear a b c = False, (a /= b) = True, (b /= c) = True, (a /= c) = True)) -- Any line contains at least two points. contain_two_pts : (l : line) -> (a : point ** b : point ** (contains l a = True, contains l b = True)) -- If two lines intersect at a point and they are not identical, that is the o- -- nly point they intersect at. intersect_at_most_one_point : Plane line point => (l : line) -> (m : line) -> (a : point) -> (b : point) -> ((l /= m) = True) -> (intersects_at l m a = True) -> (intersects_at l m b = True) -> ((a == b) = True) intersect_at_most_one_point l m a b l_not_m int_at_a int_at_b = same_line_same_pts l m a b l_not_m (fst (intersection_result l m a int_at_a)) (fst (intersection_result l m b int_at_b)) (snd (intersection_result l m a int_at_a)) (snd (intersection_result l m b int_at_b)) My main concerns is that I have an "inefficient" (possibly incorrect?) formulation of the axioms. The code runs and compiles, but it feels like for instance, somehow, intersection_criterion and intersection_result could be somehow made into one axiom. Nevertheless, any advice is appreciated. (This is also on Github here in the file hilbert.idr) Answer: It still may be far from perfect, but I asked a question on Stackoverflow related to this, and received an excellent answer from xash. I have attempted to cure my Boolean blindness. If I understand correctly, the operator == asks the question "Is the left hand side equal to the right hand side?" Instead, I want to say: "I have a proof that this holds." To fix this, I changed a lot of my functions like colinear to more closely match the (=) signature: colinear : point -> point -> point -> Bool becomes: Colinear : point -> point -> point -> Type Also, I removed a /= b and instead replaced it with Not (a = b). Here was the resulting code: interface Plane line point where -- Abstract notion for saying three points lie on the same line. Colinear : point -> point -> point -> Type Coplanar : point -> point -> point -> Type Contains : line -> point -> Type -- Intersection between two lines IntersectsAt : line -> line -> point -> Type -- If two lines l and m contain a point a, they intersect at that point. intersection_criterion : (l : line) -> (m : line) -> (a : point) -> Contains l a -> Contains m a -> IntersectsAt l m a -- If l and m intersect at a point a, then they both contain a. intersection_result : (l : line) -> (m : line) -> (a : point) -> IntersectsAt l m a -> (Contains l a, Contains m a) -- For any two distinct points there is a line that contains them. line_contains_two_points : (a : point) -> (b : point) -> Not (a = b) -> (l : line ** (Contains l a, Contains l b)) -- If two points are contained by l and m then l = m two_pts_define_line : (l : line) -> (m : line) -> (a : point) -> (b : point) -> Not (a = b) -> Contains l a -> Contains l b -> Contains m a -> Contains m b -> (l = m) same_line_same_pts : (l : line) -> (m : line) -> (a : point) -> (b : point) -> Not (l = m) -> Contains l a -> Contains l b -> Contains m a -> Contains m b -> (a = b) -- There exists 3 non-colinear points. three_non_colinear_pts : (a : point ** b : point ** c : point ** (colinear a b c = False, Not (a = b), Not (b = c), Not (a = c))) -- Any line contains at least two points. contain_two_pts : (l : line) -> (a : point ** b : point ** (Contains l a, Contains l b)) -- If two lines intersect at a point and they are not identical, that is the o- -- nly point they intersect at. intersect_at_most_one_point : Plane line point => (l : line) -> (m : line) -> (a : point) -> (b : point) -> Not (l = m) -> IntersectsAt l m a -> IntersectsAt l m b -> (a = b) intersect_at_most_one_point l m a b l_not_m int_at_a int_at_b = same_line_same_pts l m a b l_not_m (fst (intersection_result l m a int_at_a)) (fst (intersection_result l m b int_at_b)) (snd (intersection_result l m a int_at_a)) (snd (intersection_result l m b int_at_b))
{ "domain": "codereview.stackexchange", "id": 31051, "tags": "idris" }
Angular distance Vs Angular displacement
Question: Are the two terms - 'Angular distance', and, 'Angular displacement' interchangeable for a uniform circular motion? Answer: I don't think so - two and a half rotations would be the angular distance, while the angular displacement would still be a half rotation.
{ "domain": "physics.stackexchange", "id": 77229, "tags": "kinematics" }
relationship between ROS and robot hardware
Question: Hello, I just got started with ROS and I have a quick clarification question before I get myself into the wrong mindset about thinking of ROS. Please let me know if any of the following are correct/incorrect/half-correct - thanks in advance! I want to build a teleoperated robot. I have a 'laptop' and a 'workstation' from which I wish to control the robot. scenario 1: install ROS on 'laptop'. SSH into laptop from workstation to control robot. This is like Turtlebot. scenario 2: install ROS on 'workstation', use a Bluetooth/Wireless stack on 'workstation' to receive input from robot, make complex decisions, and send commands to differential drives & whatnot. Is the 'laptop' still necessary on the robot (translating bluetooth messages to the actuators)? Is this more like NXT, where the NXT brick doesn't actually have ROS loaded on it but rather is being controlled by a ROS-powered workstation via bluetooth stack? scenario 2 (continued): if ROS is not installed on the physical robot (teleop), what components of ROS would I actually use on the workstation? The NXT brick would handle all the basic moving & sensing, and the workstation only has to use ROS bluetooth to handle data? Originally posted by ejang on ROS Answers with karma: 3 on 2012-02-13 Post score: 1 Answer: Both of those options are "correct". There is not a correct way to handle this kind of system. It's entirely based on your application's needs. For example, many quadrotors don't have ROS loaded on the bot itself. They run a dedicated firmware that was specifically designed for efficiency and speed. Since they provide a communications protocol, one must simply write a ROS node that translates ROS commands into simple drive commands. All complex decision-making is performed on the "workstation". This is similar to the NXT scenario. You do all of the major processing off the robot and then let the dedicated hardware (the "brick") do all of the low-level stuff. There is no "right" way. On my robot, we have two PCs that communicate as a single system. We then connect to those PCs via ssh to add additional computation power to the system. It also depends how much control you want over hardware. You can either access hardware through the built-in controls (on the "brick") via a programming API, or you can control the drivers directly with the "laptop" configuration. Another thought to consider is latency. If the laptop (local) does most of the base work, then latency matters less. If you're using scenario 2, EVERY command must be sent across the wireless signal. If latency is bad, robot performance will be directly affected. Originally posted by DimitriProsser with karma: 11163 on 2012-02-14 This answer was ACCEPTED on the original site Post score: 3
{ "domain": "robotics.stackexchange", "id": 8224, "tags": "ros, hardware, bluetooth, beginner" }
Signal Estimation after detection Part 2
Question: I have a binary information source signal $s(t)$ that is corrupted by additive White Gaussian noise $w(t)$ at a particular SNR. The received signal is: $$x(t) = s(t) + w(t)$$ Then I have created a matched filter $h(t)$ as the time reversal of the source binary signal $s(t)$. Question: After signal detection, how to estimate the clean signal $s(t)$? I have used the sign() operation. Is that the correct way or should I use sophisticated methods like MLE, LMS etc? In many implementations, a hard threshold zero is used to decode the signal in order to get back the transmitted source symbols $\hat{s}(t)$. Is that the correct way? Is my implementation correct where I have used sign function to estimate the symbols. This is my implementation. Please correct me where wrong. clear all N = 50; input = rand(1,N)>0.5; s=(2*input-1); %input x = awgn(s,15,'measured'); %received noisy signal matched_filter_h = flipud(s); s_hat = sign(filter(matched_filter_h,1,x)); The code output shows that $s(t)$ and $\hat{s}(t)$ are identical. So, it seems that the estimation is possible. Answer: After signal detection, how to estimate the clean signal $s(t)$? Matched filtering is used to detect the presence of a known signal in noise. There is no estimation part when you are talking about a matched filter. The estimate part comes after you have done the matched filter and need to estimate the symbols. It looks like you are talking about a communication system context, but the matched filtering in those systems is not done by matched filtering the symbols as in your example code. For each symbol, the true symbol is unknown and the pulse shape is known, and that is why the matched filter uses the pulse shape to matched filter. Perhaps all that you want to know about matched filtering can be found here, https://dsp.stackexchange.com/a/9389/31316, and you should give it a read. Edit The OP questions about the zero threshold and whether there are other methods. That threshold is set through using the maximum likelihood solution, or minimum distance decoding. For the case of BPSK, you want to decide if $x$ contains $s_1=+1$ or $s_2=-1$, and the ML rule is to choose $\hat{s}=s_1$ if: $$ |x-s_1|<|x-s_2| $$ This can also be interpreted as partitioning the IQ diagram into different regions (Voronoi regions), a region for $s_1$ and a different region for $s_2$. These regions end up looking like this (note how for BPSK the threshold line is at zero): For full derivation, see Tse's Wireless Comms book section A.2.1, https://web.stanford.edu/~dntse/Chapters_PDF/Fundamentals_Wireless_Communication_AppendixA.pdf.
{ "domain": "dsp.stackexchange", "id": 9707, "tags": "signal-detection, estimation" }
Rays in Symmetric Resonator
Question: I'm having some trouble figuring out how to get started on this question: If I have a symmetric resonator with two concave mirrors of radii $R$ separated by a certain distance, after how many round trips through the resonator could I expect the ray to retrace its path? It seems to me that this implies some sort of periodic function should exist that models the behavior of a ray in such circumstances. Answer: You should consider what constraints you can place on the ray simply based on the information you have about the resonator. You should also remember what you already know about rays in a stable resonator.
{ "domain": "physics.stackexchange", "id": 5038, "tags": "homework-and-exercises, optics, visible-light, reflection, resonance" }
What is the biological mechanism underlying caffeine intolerance? (CYP1A2 or other?)
Question: As far as I can tell, caffeine metabolism occurs primarily via the CYP1A2 enzyme. I am curious as to whether mutations in the CYP1A2 gene are associated with caffeine intolerance. Some site that is not necessarily reputable suggested that caffeine intolerance is due to absence of the enzyme to metabolize caffeine. I am having difficulty finding evidence either supporting or contradicting this claim. Does anyone know if there is evidence that suggests mutations in CYP1A2 (and therefore inability to properly metabolize caffeine) are associated with caffeine intolerance, or if there is another proposed mechanism for caffeine intolerance? Answer: I have found that caffeine is mostly metabolized over CYP1A2 (as we know), but also over CYP1A1, CYP2E1 and CYP3A4. The question is how much the individual can cope with this alternate pathways if he is a CYP1A2 poor metabolizer. [1] Secondly I found in my pharmaceutics textbook that CYP2A1 activity determination is indicated due to undesirable side effects to caffeine and theophyllin.[2] UPDATE: 19.03.2013 12:30 GMT+1 @dd3: To clarify the second statement: I used the approach that theophyllin and caffeine are similar xanthin derivates. Since theophyllin is used as therapeutic drug in respiratory diseases (COPD etc.), I thought that scientific interest would be greater. And yes CYP1A2 plays an important role in both theophyllin tolerance and intollerance. I found some new sources: At first a study with CYP1A2 knock-out mice, which shows that the elimination of theophyllin takes 4-times longer in CYP1A2(-/-)mice. And they also assume that the same behaviour is also applicable to caffeine. [3] Also there are two clinical studies by Japanese and Turkish scientists. The Japanese conclude, that theophyllin should be used with care in CYP1A2 poor metabolizer (even in haplotype poor metabolizer), since theophyllin has a narrow therapeutic range.[4] But the Turkish scientist assume that according to their findings, theophyllin is also metabolized over alternative pathways such as CYP2A13, CYP1A1, CYP2E1, CYP2D6 and CYP3A4. And according to this the genetic status of CYP1A2 is not as important as expected. [5] Conclusion: Assuming that theophyllin and caffeine are metabolized similarly, you can conclude that a mutation in CYP1A2 would lead to caffeine intolerance. In my experience people with caffeine intolerance describe severe nervousness after drinking a cup of coffee. Fatal cases I know were due to high consumption of energy drinks in children without or at least not known caffeine intolerance. Sources [1] http://informahealthcare.com/doi/abs/10.1081/DMR-120001392 [2] german source: http://www.medizinische-genetik.de/index.php?id=2642 [3] Derkenne S, Theophylline pharmacokinetics in mice: http://www.ncbi.nlm.nih.gov/pubmed/15970798 [4] Obase Y, Polymorphisms in the CYP1A2 gene and theophylline metabolism in patients with asthma. http://www.ncbi.nlm.nih.gov/pubmed/12732846 [5] Ahmet U, The effect of CYP1A2 gene polymorphisms on Theophylline metabolism and chronic obstructive pulmonary disease in Turkish patients http://www.jbmb.or.kr Further Reading: Xanthines as therapeutic drugs: http://web.carteret.edu/keoughp/LFreshwater/PHARM/NOTES/Xanthines.htm
{ "domain": "biology.stackexchange", "id": 895, "tags": "genetics, pharmacology, metabolism, enzymes" }
Calculating Angular Acceleration using Torque and Rotational Inertia
Question: I don't understand why my approach to this problem is wrong. Two masses, one with mass $m$, and the other with mass $2m$, are attached to a light rigid rod as shown below. When the system is released from rest, the rod begins to rotate with an angular acceleration of $-g/(9L)$. However, I got $g/(7L)$. My method is using torque, but not inertia and angular acceleration. $$F = ma = m \alpha r \tau = mar = m \alpha r \cdot r$$ $$2mgL - 4mgL = 2m \alpha L^2 - 16m \alpha L^2 \alpha = g/(7L)$$ I do not understand why my approach is wrong. Answer: You are not using Newton's equations per se, you are basically using the Newton's equations for rotation when multiplying by the position. Your error comes when you wrote $2-16$, when it's actually $2 + 16$. The minus sign that you written comes from the $-4L$, but it's squared, so it goes away. It's the same approach than using $\tau = I \alpha$.
{ "domain": "physics.stackexchange", "id": 75778, "tags": "homework-and-exercises, torque" }
Sorting an int[] array with an Insertion Sort
Question: I'm currently learning about different sorting algorithms and after reading on the concept of the insertion sort of how it's done I've tried to implement it by myself before seeing how it was implemented in the source from which I'm learning. This was my implementation: void insertionSort(int arr[]) { for(int i = 1; i < SIZE; i++) { for(int j = i; j > 0 && arr[j] < arr[j - 1]; j--) { int swap = arr[j]; arr[j] = arr[j - 1]; arr[j - 1] = swap; } } } Now I think I've implemented it correctly (not talking about efficient or anything just the algorithm itself), but when I saw the way it was implemented in the course I'm seeing I had a bit of concerns about my implementation because they're really different and I just wanted to make sure my implementation is really an insertion sort algorithm implementation and I didn't done anything else. Here's the implementation by the source I'm learning from: void insertionSort(int arr[]) { for(int i = 1;, i < SIZE; i++) { int value = arr[i]; int j = i - 1; int done = 0; do { if (arr[j] > value) { arr[j + 1] = arr[j]; j--; if (j < 0) done = 1; } else { done = 1; } } while(!done) arr[j + 1] = value; } } Is my implementation of insertion sort correct ? Also I would appreciate if you could compare my implementation to the one made by the source I'm learning from, any efficiency differences or any other thing you think would worth mentioning. Answer: Your implementation is not quite correct. It is subtle, but what you have is a type of bubble sort in that you do not insert the new value, rather you 'slide' the value in to place. An insertion sort can be thought of as a 'hole'. You shift all the 'bigger' values to the right by one space, and create a hole at the point where the value should be inserted. Then you insert the value in to the hole. There should not be a concept of a 'swap' in an insertion sort. You start at the first unsorted value, and then if the previous values are smaller, you move them up one, until you have the space at the right spot. Then you put the value there. The example code makes this 'obvious' (but not really), in that it always compares against value and not arr[j+1]. It also has only a single 'assignment' to the array for each time in the loop. Your swap routine does two assignments on each loop. So, No, your implementation of an insertion sort is not quite correct. It is a correct sort, but not a 'text-book' insertion sort since it slides, rather than inserts the value.
{ "domain": "codereview.stackexchange", "id": 9461, "tags": "c, sorting, insertion-sort" }
Orbits using Newtons laws
Question: I am making a small orbit simulator. I figured out Kepler's laws and know how to work with them to "update" an orbit based on time. So now I want to know how I can describe orbits with newton laws. Given a position vector and a velocity vector of a satellite, What I currently do is this: Calculate gravitational acceleration: (g*mass)/distance^2 Add this number to the velocity vector. Multiply the new velocity vector with the DeltaTime Add it to the position vector Go back to 1. However when converting this to Keplerian Orbit Elements doesn't really make them constant.(Calculations from here: Cartesian To Kepler) Also the orbit isn't really stable, and also wobbles around. So can anyone help me fix this? Or am I doing something wrong? Answer: I like to classify solutions of the problem of the time evolution of the complete initial state of a set of objects at some epoch time, where the objects are subject to Newtonian gravitation into two main groups. One approach is to use orbital elements of some sort. The other is to use a numerical initial value problem solver, aka a numerical integrator. The latter is the primary subject of this answer. Note well: This classification isn't quite perfect as hybrid approaches are also possible, wherein one uses a numerical integrator to integrate time-varying orbital elements. Keplerian orbital elements work quite nicely in the case of two point masses, or more generally, two objects with a spherical mass distribution. The anomaly is the only Keplerian orbital element that changes over time. Keplerian elements can be used in situations where the underlying assumptions are approximately correct by developing a model of how those supposedly unchanging elements vary with time. One way of doing this is to use Lagrange's planetary equations. (There are other related approaches such as Gauss' planetary equations, Delaunay's planetary equations, etc.) Lagrange's planetary equations yield expressions for how Keplerian orbital elements vary over time given a set of perturbing forces. Another approach is to use something akin to those Keplerian elements such as Delaunay elements), coupled with planetary equations for those alternative elements. Yet another approach is to use orbital elements (e.g., Brouwer-Lyddane elements, SGP4 elements) in which the planetary equations are embedded in the orbital element to Cartesian state transformation algorithm. This final approach is used to this day to describe vehicles in Earth orbit. The other approach is to use numerical integration. I'll start with a discussion of how to solve for the value of a scalar function $x(t)$ at some time $t_1$ given an initial value $x(t_0) = x_0$ and some well-behaved (continuous and bounded) derivative function $f(x(t),t) = dx(t)/dt$ that describes the time evolution of $x$. This falls in the very broad category of initial value problems. Suppose the ordinary differential equation cannot be solved analytically and cannot be expressed terms of a useful power series. This doesn't mean nothing can be done. There are a number of techniques for solving this problem numerically. Note the dependence of the derivative function on the dependent variable $x$. This becomes the much simpler problem of numerical quadrature if the derivative function can be expressed independent of $x$. The discussion that follows assumes that the derivative function $f$ does indeed depend on $x$. Note well: Newtonian gravitation falls in this category. It also assumes the derivative function is well-behaved. Numerically integrating across a discontinuity is a bad idea. The foundation of the integration-based techniques for a scalar function is the mean value theorem, which says that at some time $t_c$ between $t_0$ and $t_1$, the value at $t_1$ is exactly $x(t_1) = x(t_0) + (t_1-t_0)\,f(x(t_c),t_c)$. If only we could find that magical $t_c$ and the derivative at that point. There's a chicken and egg problem here: that magical point in time is not known. Even if it was, the derivative function depends on state, and that too isn't known. A very simple approach around this problem is to assume that this magical point is the initial point: $$x(t_1) = x(t_0) + (t_1-t_0)f(x(t_0),t0)$$ This works quite nicely for values of $t_1$ that are very close to $t_0$. It doesn't work very well at all where $|t_1 - t_0|$ is not small. This suggests splitting the interval $(t_0, t_1)$ into a number of smaller intervals. This results in Euler's method: Apply the above to advance state to time $t_0+\Delta t$, then to $t_0+2\Delta t$, and so on, eventually reaching the desired time. Euler's method is rather lousy, even for a simple first order scalar ODE. We can do much better than this. The key reason for discussing Euler's method is that it is the basis for many other integration techniques. Learn how it works, then toss it. One approach to approving on Euler's method is to somehow correct the result from Euler's method. For example, take an Euler step and compute the derivative at the end point. Then use the average of those two derivative values (the original value used to make the Euler step, and the other from the end of the Euler step) to recompute the step from $t$ to $t+\Delta t$. THis is Heun's method. Another approach is to guess that the magical point $t_c$ lies somewhere between $t$ and $t+\Delta t$. Perhaps the middle? We can use Euler's method to advance state to the midpoint, and then use the derivative at that point to advance state from $t$ to $t+\Delta t$. This is the midpoint method. Both Heun's method and the midpoint method appear to be steps backwards, computationally. While Euler's method requires but one evaluation of the derivative function per time step, these improved methods require two. However, the error growth is in general so much smaller with either Heun's method or the midpoint method compared to that from Euler's method. This means that those "improvements" most definitely are improvements. The expense of calling the derivative function twice per step is more than offset by the fact that imprpovements enables take steps that are orders of magnitude larger than one can make with Euler's method. Both Heun's method and the midpoint method are simple improvements. This problem has been studied in many guises. There are many more advanced techniques. One is the class of Runge-Kutta integration techniques. Both Heun's method and the midpoint method fall into this class. The most popular of these, classical Runge-Kutta 4, is a significant improvement on those two methods. There are even higher order Runge-Kutta integrators than RK4. Heun's method also falls into the broad class of predictor-correctors, wherein one method (the predictor) advances state to the end of the interval and another method (the corrector) uses the derivative at this approximate endpoint to correct the guess made by the predictor. The above focused solely on first order ODEs involving a scalar function. What if the problem is multidimensional or involves higher order derivatives? The mathematics of the techniques described above can easily handle multidimensional data: Simply use the vector-valued time derivative. Since a higher order ODE can be converted to a first order ODE via an augmented vector-valued state, the same approaches used to address multidimensional data can also be employed to address higher order ODEs. There's a problem with doing this: It throws out geometry. For example, consider the rather simple first order ODE $\dot x = -y, \dot y = x$. The solution to this multivariate ODE is uniform circular motion. Applying Euler's method to this results in $$\begin{aligned} x(t+\Delta t) &= x(t) - \Delta t\,y(t) \\ y(t+\Delta t) &= y(t) + \Delta t\,x(t) \end{aligned}$$ The square magnitude of this new vector is $(x(t)^2+y(t)^2)(1+\Delta t^2)$, which is always greater than magnitude of the vector at the start of the step. This is not uniform circular motion. The solution obtained via Euler's method instead spirals out. Other techniques spiral inward. A geometric integrator on the other hand will somehow maintain the constraint that $x^2+y^2$ is a constant of motion. The above example showed why we don't want to toss geometry in a very simple problem. The geometry of Newtonian gravitation, along with much classical mechanics in general, is symplectic geometry. This is why symplectic integrators are of great concern. A simple example again, with Euler's method: Suppose the second derivative of position is given by some function $\ddot {\vec x}(t) = \vec f(x(t),t)$. Applying the basic Euler method against 3+3 dimensional phase space dictates that $$\begin{aligned} \vec x(t+\Delta t) &= \vec x(t) + \Delta t \vec v(t) \\ \vec v(t+\Delta t) &= \vec v(t) + \Delta t \vec f(\vec x(t),t) \end{aligned}$$ A simple change makes this symplectic: $$\begin{aligned} \vec v(t+\Delta t) &= \vec v(t) + \Delta t \vec f(\vec x(t),t) \\ \vec x(t+\Delta t) &= \vec x(t) + \Delta t \vec v(t+\Delta t) \end{aligned}$$ As is the case with the scalar techniques discussed at the start, Euler's method is a starting point rather than the end with regard to symplectic integration techniques. Symplectic Euler's method is rather lousy. But at least orbits don't spiral outward. In making an N-body gravitation simulation, the size of N (the number of bodies) is a key concern. Simulating a galaxy is a very different concern from simulating a star system. The techniques used in simulating the formation of a galaxy are very different from those used to develop a solar system ephemeris. Galactic scale simulations cannot afford to calculate all of the N2 gravitational interactions amongst all the particles, and because N is so large, it cannot afford anything more complex than very simple integrators. A star system model that does not calculate all N2 of the gravitational interactions or that uses very a simple integrator will by viewed in disdain.
{ "domain": "astronomy.stackexchange", "id": 2279, "tags": "orbit, orbital-elements, orbital-mechanics, newtonian-gravity" }
Does the multiplication and division significant figures rule apply to unit conversions as well?
Question: My chemistry teacher told our class that answers to calculations that involve multiplication and division should be given to same number of significant figures as the factor, dividend or divisor with the lowest no. of sig.figs. For example, to calculate the no.of moles of calcium in a pure 40g sample of calcium, the dividend (the mass) has 1 sig.fig. and the divisor (the molar mass) has 3 sig.figs. Hence the no. of mole sis given to 1 sig.fig. 40/40.1 = 1 mol of Calcium Does this same rule apply to unit conversions? For example if I wanted to convert 12.4 milligrams to grams, I would need to divide 12.4 milligrams by 1000. 12.4 has 3 sig.figs. whereas 1000 has only 1 sig.fig. Does that mean the figure in grams must be given to 1 sig.fig? Answer: In regards to the calcium situation, it depends on whether you measured 40g of calcium (which would be 2 sig. fig.), or 40g was the stated value (1 sig. fig.). Yes, the number of sig figs in your answer should equal the number of sig figs in the least accurate piece of data. When converting units, the 1000 is taken as the exact value, i.e. it has infinite accuracy. Here, it is not considered, so the answer should have 3 sig fig from 12.4 (which is also why scientific notation is used to decrease ambiguity).
{ "domain": "chemistry.stackexchange", "id": 8881, "tags": "significant-figures" }
Why is the partial trace of this subsystem equal to this?
Question: I am doing my bachelors dissertation based on an article by David Deutsch. He defines the action of a quantum gate as: $$ U = \sum_{x, y \in \mathcal{Z}_{2}} |x \dot{+}y\rangle|y\rangle\langle x|\langle y| $$ where $\dot{+}$ is the OR operator from boolean algebra. Then, according to me: He then takes the partial trace: $$ \text{Tr}_{2}[U(|\psi\rangle\langle\psi|\otimes\hat{\rho})U^{\dagger}] $$ and it is equal to: $$ \frac{1}{2}\hat{I} + \text{Re}[(\langle 0|\psi\rangle\langle\psi|1\rangle)(|0\rangle\langle 1|+|1\rangle\langle 0|)] $$ EDIT: where does the $\frac{1}{2}\hat{I}$ comes from? Answer: Let's rewrite $U$ as, so it's more clear what it does: \begin{aligned} U &= \sum_{x, y \in \mathbb{Z}_{2}} |x \dot{+}y\rangle|y\rangle\langle x|\langle y| \\ &= \sum_{x, y \in \mathbb{Z}_{2}} |x \dot{+}y\rangle_1|y\rangle_2\langle x|_1\langle y|_2 \\ &= \sum_{x, y \in \mathbb{Z}_{2}} \left(|x \dot{+}y\rangle_1\langle x|_1\right)\left(|y\rangle_2\langle y|_2\right) \ .\\ \end{aligned} This operator leaves the first qubit untouched and maps the second qubit to the logical OR of the qubits. I have to say this is a peculiar operator, that's not even unitary. Normally people use the XOR gate(which is unitary) in the quantum information field. I guess the paper predates most of quantum information algorithms. Anyway, $U^\dagger$ is straightforward to calculate: $$U^\dagger = \sum_{x, y \in \mathbb{Z}_{2}} \left(|x\rangle_1\langle x \dot{+}y|_1\right)\left(|y\rangle_2\langle y|_2\right) \ .$$ \begin{aligned} \text{Tr}_2\left[ U \left(|\psi\rangle\langle\psi|\otimes\rho\right)U^\dagger\right] &= \text{Tr}_2 \left[\sum_{w,x, y,z \in \mathbb{Z}_{2}} \left(|x \dot{+}y\rangle_1\langle x|_1\right)\left(|y\rangle_2\langle y|_2\right) \left(|\psi\rangle\langle\psi|_1\otimes\rho_2\right) \left(|w\rangle_1\langle w \dot{+}z|_1\right)\left(|z\rangle_2\langle z|_2\right)\right] \\ &= \text{Tr}_2\left[\sum_{w,x, y,z \in \mathbb{Z}_{2}} \left(|x \dot{+}y\rangle\langle x|\psi\rangle\langle\psi|w\rangle\langle w \dot{+}z| \right)_1\otimes \left(|y\rangle\langle y|\rho |z\rangle\langle z|\right)_2 \right] \\ &=\sum_{u\in \mathbb{Z}_2} \sum_{w,x, y,z \in \mathbb{Z}_{2}} \left(|x \dot{+}y\rangle\langle x|\psi\rangle\langle\psi|w\rangle\langle w \dot{+}z| \right) \left(\langle u|y\rangle\langle y|\rho |z\rangle\langle z|u\rangle \right) \\ &= \sum_{u\in \mathbb{Z}_2} \sum_{w,x \in \mathbb{Z}_{2}} \left(|x \dot{+}u\rangle\langle x|\psi\rangle\langle\psi|w\rangle\langle w \dot{+}u| \right)\left(\langle u|\rho |u\rangle \right) \\ &= \sum_{w,x \in \mathbb{Z}_{2}} \langle x|\psi\rangle\langle\psi|w\rangle\left(\langle 0|\rho |0\rangle|x \rangle\langle w | +\langle 1|\rho |1\rangle|1 \rangle\langle 1 |\right) \end{aligned} Now, the paper says $\rho = \frac{1}{2}\hat{I} + \text{Re}[(\langle 0|\psi\rangle\langle\psi|1\rangle)(|0\rangle\langle 1|+|1\rangle\langle 0|)]$, is a solution to the equation $\text{Tr}_2\left[ U \left(|\psi\rangle\langle\psi|\otimes\rho\right)U^\dagger\right] = \rho $, which I think you can check yourself by substituting it in the above equation.
{ "domain": "physics.stackexchange", "id": 50391, "tags": "quantum-mechanics, hilbert-space, quantum-information, quantum-computer, density-operator" }
How to remove power line interference?
Question: I have a couple of datasets that show a peak around the 50-60 Hz range (mostly around 55Hz and in some cases at their harmonics i.e. 100-120Hz.) In some datasets the spike is significant in comparison with other frequencies. Before I analyse the data, I need to ensure that these spikes are not interfering with my analysis. I was wondering what filters would be suitable especially at a larger frequency bandwidth/range (50-60 Hz.) Answer: Well, if I were doing this from scratch, I would do this with biquad notch filters with very high Q and adjustable coefficients. Two or three of them with frequencies that are harmonically locked. An algorithm could be measuring the difference between the notches and a "wire" and very slowly adjust the fundamental frequency and maximize that difference. probably you could put a control loop on that.
{ "domain": "dsp.stackexchange", "id": 10312, "tags": "fft, filters, digital-filters, interference" }
Wrap a long string to an array of short-enough strings
Question: I have made a function that wraps text in pygame (i.e. it turns a long string into an array of smaller strings that are short enough so they fit within the given width when rendered in the given font). My function is below. text is the string we want to break up, font is a pygame font object, and max_width is the number of pixels wide we want the lines to be at maximum (an integer). def wrap_text(text, font, max_width): lines = [] words = text.split(" ") while words: line = words.pop(0) if words: width = font.size(" ".join((line, words[0])))[0] while width < max_width: if words[0] == "\n": # Forced newline when "\n" is in the text del words[0] break line = " ".join((line, words.pop(0))) if not words: break width = font.size(" ".join((line, words[0])))[0] if font.size(line)[0] > max_width: # When there is only one word on the line and it is still # too long to fit within the given maximum width. raise ValueError("".join(("\"", line, "\"", " is too long to be wrapped."))) lines.append(line) return lines note: font.size(string) returns a tuple containing the width and height the string will be when rendered in the given font. As you can see, I have the statements while words:, if words: and if not words: all within each other. I have been trying to refactor this by moving things around but I simply cannot think of a way to remove any of the 3 statements above. Any help is much appreciated :). Any comments about anything else in my code is welcome too. Answer: Bug You have a small bug. For text="abc \\n def" and max_width=10 the output is incorrectly ['abc', 'def'] instead of ['abc def']. You can greatly simplify the handling of embedded whitespace characters by using re.split with a pattern r'\s+'. Don't repeat yourself This (or almost the same) piece of code appears in multiple places: " ".join((line, words[0])). Look out for duplication like this and use helper functions to eliminate. Overcomplicated string joins Instead of " ".join((line, words[0])), it would be a lot simpler to write line + " " + words[0]. Simplify the logic Consider this alternative algorithm: Create a word generator: from some text as input, extract the words one by one For each word: If the word is too long, raise an error If the current line + word would be too long, then Append the current line to the list of lines Start a new line with the current word If the current line + word is not too long, then append the word to the line Append the current line to the list of lines Implementation: def wrap_text(text, font, max_width): def gen_words(text): yield from re.split(r'\s+', text) # or in older versions of Python: # for word in re.split(r'\s+', text): # yield word def raise_word_too_long_error(word): raise ValueError("\"{}\" is too long to be wrapped.".format(word)) def too_long(line): return font.size(line)[0] > max_width words = gen_words(text) line = next(words) if too_long(line): raise_word_too_long_error(line) lines = [] for word in words: if too_long(word): raise_word_too_long_error(word) if too_long(line + " " + word): lines.append(line) line = word else: line += " " + word lines.append(line) return lines Some doctests to verify it works: def _wrap_text_tester(text, max_width): """ >>> _wrap_text_tester("hello there", 7) ['hello', 'there'] >>> _wrap_text_tester("I am legend", 7) ['I am', 'legend'] >>> _wrap_text_tester("abc \\n def", 10) ['abc def'] >>> _wrap_text_tester("toobigtofit", 7) Traceback (most recent call last): ... ValueError: "toobigtofit" is too long to be wrapped. >>> _wrap_text_tester("in the middle toobigtofit", 7) Traceback (most recent call last): ... ValueError: "toobigtofit" is too long to be wrapped. >>> _wrap_text_tester("", 7) [''] """ return wrap_text(text, font, max_width)
{ "domain": "codereview.stackexchange", "id": 23531, "tags": "python, python-2.x, pygame" }
This program reads in integers until a sentinel number is read. And prints the largest integer read
Question: I'm just looking for feedback on how I could improve this simple program. Whether it be comments for better understanding or better variable names. I try not to focus on memory consumption because I'm honestly not that knowledgeable as of yet. My main focus is getting a written solution on paper and then implement in syntax. I was given the question to write a program that reads in integers one per line, once a sentinel integer is read end program, and display the largest integer. The way I solved it was with pen and paper first because I'm working on my pseudocode and algorithm design. If there are any books on the subject for recommendation. I would greatly appreciate. /** * This program reads in integers until a sentinel number is read, * and prints the largest. * * @author shawn * */ //Import needed for Console Programs. import acm.program.*; public class LargestListValue extends ConsoleProgram { public void run() { /*Integer variables to track the current largest input * and current input. */ int x, y; println("This program reads in integers until a sentinel number is read, "); println("and prints the largest."); while (true) { //Initial while to read in an initial integer. x = readInt(" ? "); //Initial read integer. Also current largest. if (x == SENTINEL_NUMBER) { println("No numbers?"); break; } else { y = readInt(" ? "); //Next read integer. if (y == SENTINEL_NUMBER) { println("Largest number is " + x + "."); break; } else { while (y <= x) { //While second read integer y = readInt(" ? "); if (y == SENTINEL_NUMBER) { println("Largest number is " + x + "."); break; } else if (y >= x) { while (y >= x) { x = readInt(" ? "); if (x == SENTINEL_NUMBER) { println("Largest number is " + y + "."); break; } else if (y <= x) break; } } } } } //Extra break added to the break; } } //Sentinel number to end reading integers and print largest. private static final int SENTINEL_NUMBER = 0; } Answer: break; } else { You don't need an else with a break. The break ends execution inside the loop. You can never reach the code after it in the loop unless it doesn't trigger. Without all the else clauses, you can significantly reduce the amount of indentation. Your code could also return instead of break. That would be clearer in my opinion. What's x? What's y? Why loop forever if you're just going to break out of it? In my opinion, your comments would be better replaced with more readable, self-commenting code. Consider the following public static int readLargest(final int SENTINEL) { int largest = readInt(" ? "); for (int current = largest; current != SENTINEL; current = readInt(" ? ") ) { if (current > largest) { largest = current; } } return largest; } Which you'd use as public void run() { int largest = readLargest(SENTINEL_NUMBER); if (largest == SENTINEL_NUMBER) { println("No numbers?"); } else { println("Largest number is " + largest + "."); } } By using a separate method, we consolidate the possibilities. We only have two println statements. In the original code, you had the latter option three times. Here we only need one. This also makes the readLargest more reusable. Even better might be to read all the values first, then call a findLargest method on that. Then your readNumbers and findLargest methods would both be reusable separately. Of course, you may not have gotten to Collection types yet. In your original code, sometimes y was the largest and sometimes x. In this code, largest is always the largest unless the most recent read returns a larger value. I prefer the names current and largest as being more descriptive than single letter names. I find that more important with largest. I could live with x instead of current. I'm not sure that it's necessary to say SENTINEL_NUMBER. The current variable is an integer. What would SENTINEL be? A cabbage? Obviously not. You'd only compare an integer to some kind of number. Others may disagree. In particular, you may want to look up Hungarian Notation. I prefer to pass the sentinel value as a parameter for flexibility. Since we don't use current outside of the loop, I think that the for loop has better scope than a while loop. The only comment that seems necessary is that readLargest will return the sentinel value if and only if there are no other numbers in the input (prior to the sentinel value). Other than that, I feel that the code is pretty clear about what it is doing. Perhaps some explanation of the initialization of largest.
{ "domain": "codereview.stackexchange", "id": 26563, "tags": "java, algorithm" }
Number of products required to multiply polynomial coefficients
Question: I am wondering about the claim from the book "Probability and computing" that the number of products required to multiply monomial coefficients is $\Theta(d^2)$, where $d$ is the number of monomials that forms the polynomial $$P(x) = (x-a_1)(x-a_2)\cdots(x-a_d).$$ I think this answer is wrong and it takes $\Theta(2^d)$ instead. Could someone explain this, please? Answer: Consider the following algorithm: Start with $P = 1$. For $i=1,\ldots,d$: replace $P$ with $P \cdot (x - a_i)$. At step $i$, the polynomial $P$ has degree $i-1$, and so we need to compute $$ \sum_{j=0}^{i-1} p_j x^j (x - a_i) = \sum_{j=1}^i p_{j-1} x^j - \sum_{j=0}^{i-1} a_i p_j x^j = -a_i p_0 + \sum_{j=1}^{i-1} (p_{j-1} - a_i p_j) x^j + p_{i-1} x^i. $$ As you can see, this requires $i-1$ products. Summing over all $i$, we obtain $\frac{d(d-1)}{2} = \Theta(d^2)$ products. In fact, using FFT we can improve on this. FFT multiplies two degree $m$ polynomial using only $\Theta(m\log m)$ products. A straightforward divide and conquer approach results in the following recurrence for the number of products: $$ T(d) = 2T(d/2) + \Theta(d\log d). $$ The solution is $T(d) = \Theta(d\log^2 d)$.
{ "domain": "cs.stackexchange", "id": 19711, "tags": "complexity-theory" }
What is the complexity of computing a compatible 3-coloring of a complete graph?
Question: Given a complete graph whose edges are colored by 3 colors, a compatible 3-coloring is a coloring of nodes such that no edge of the graph has the same color as its end-points. The best algorithm I know is quasi-polynomial time. The problem is not known to be NP-complete. What is known about the status of this problem ? Answer: Apparently, this paper by Cygan, Pilipczuk, Pilipczuk and Wojtaszczyk (2010+) gives a faster (polynomial time) algorithm: http://arxiv.org/abs/1004.5010 It was accepted to SODA 2011.
{ "domain": "cstheory.stackexchange", "id": 182, "tags": "cc.complexity-theory, ds.algorithms, reference-request, graph-theory" }
Which step response matches the system transfer function
Question: A system has the following open loop bode plot: - Which one of the plots below describe the closed loop step response for the entire system? My attempt My initial thought was to look at the static gain from the open loop bode plot, which is $-4\text{dB} = 0.63$. I interpret this as this will be the final value of the step response when it is settled. The only graph that sort of matches this description is graph 3. However, it turns out that this is not the correct answer. And therefore my reasoning about the static gain can not be correct either. So my question is, how can I identify the closed loop step response from the open loop bode plot? Answer: The final value of the step response is the DC gain of the closed-loop transfer function, which is generally different from the open-loop DC gain. Assuming unity gain feedback, the feed-forward transfer function $G(s)$ equals the open-loop transfer function, and the closed-loop transfer function is given by $$C(s)=\frac{G(s)}{1+G(s)}\tag{1}$$ The final value of the closed-loop step response equals $C(0)$. With the given value of $G(0)$, which I would interpret as $-3$dB, the final value of the closed-loop step response should be around $\sqrt{2}-1\approx 0.4$. The step response in Figure $1$ seems like a good match.
{ "domain": "dsp.stackexchange", "id": 9965, "tags": "continuous-signals, control-systems, step-response, bode" }
How to update gmapping continuously?
Question: Hi all, do someone know how to update the slam gmapping map more often. I need continuous updates in time. It should not longer depend on the degree of exploration. So far I did not manage it, quite long delays appear until the map is extended. I appreciate every hint ... Thanks, Daniel Originally posted by dneuhold on ROS Answers with karma: 205 on 2014-01-06 Post score: 1 Answer: Finally I localized the problem! Since I am using the turtlebot, I launch turtlebot_navigation gmapping_demo.launch. If you have a closer look at the launch file, you will recognize that the correct parameters to set are localized in the following launch file: /opt/ros/groovy/stacks/turtlebot_apps/turtlebot_navigation/launch/includes/_gmapping.launch Following parameters have a big impact on the update speed: param name="map_update_interval" value="0.1" .... updates in sec param name="delta" value="0.1" ...... grid of the map (here each block is 10cm * 10cm) param name="particles" value="1" .... particles in the filer (default:30, which slows down the updating procedure) So I had to change these parameters and everything works fluently. Hope I could help you with my experience. BR Daniel Originally posted by dneuhold with karma: 205 on 2014-01-09 This answer was ACCEPTED on the original site Post score: 4 Original comments Comment by Swan Baigne on 2015-07-17: Thanks for your answer, it really helped me too. I just wanted to add that it seems that modifying parameter when the slam_gmapping node is running with rosparam set DOESN'T WORK, you have to change the .launch file
{ "domain": "robotics.stackexchange", "id": 16593, "tags": "navigation, costmap, update, slam-gmapping, gmapping" }
Speed of EM waves differ from GWs?
Question: I understand that light travels at speed c in vacuum, when measured locally. This speed has an exact value, not an approximation, because it is defined as 299 792 458 m / s. It is an exact value, because the meter is defined as the distance that light travels in a 1/299 792 458th of a second. Now as per SR, all particles that have no rest mass must travel at speed c in vacuum, when measured locally. This goes for photons (that build up the EM waves), and the (hypothetical) gravitons (that build up GWs). So both photons and gravitons have to travel at speed c. Both EM waves and GWs have to travel at this speed in vacuum when measure locally. So theoretically there can be no difference between the speed of EM waves and GWs (assuming they propagate both in vacuum). Then I found this: http://iopscience.iop.org/article/10.3847/2041-8213/aa920c/pdf And it talks about an experimental evidence about non-zero difference between the speed of EM waves and GWs. How is that possible? I thought the speed of light is an exact value, and it has to go for all massless particles. Question: How can there be experimental evidence of non-zero difference between the speed of EM waves and GWs? Answer: Although I did not read the full paper, there might be some details contribuing to the answer. In theory light and GW travel with the same speed, but the medium in which they travel is very different. Light travels through the current medium in space. GW perform changes to the space, that travel with the speed of light. So while GW are not affected from a medium, light is. Now as you would expect the space to be completly empty, it isn't. Particles can form in pairs of particle and anti-particle for short times, due to the Heisenberg uncertainty principle (borrow energy for a short time -> create the mass for particle-anti-particle pairs -> give them back (annihilation) in time). This "fills" the vacuum with virtual particles, which create an effective medium and lowers the speed of light, if interactions of the photons and the virtual particles happen. Since the paper observes light that traveled an enormous distance, there was enought time for such interactions to happen, hence the GW arrives faster that the photons.
{ "domain": "physics.stackexchange", "id": 52452, "tags": "quantum-mechanics, electromagnetism, general-relativity, speed-of-light" }
What are the clicking sounds when static electricity occurs?
Question: Sometimes, when there's static electricity, soft clicking sounds can be heard. This may happen when two fabrics are rubbed or when you get a static shock. What exactly causes this sounds? Answer: It is due to static discharge. When the electric field between two points in a medium exceeds the dielectric strength of the medium, then dielectric breakdown happens, and the medium becomes sort of a conductor, with charges getting exchanged between the two points. However, since the medium becomes conducting, due to the huge values of the electric field strength, the charges being exchanged acquire huge velocities over shorter distance than the mean free path in such a medium and thus their collisions with neighboring atoms excite them to higher energy states as well as to sudden high velocities, leading to rapid thermal expansion and light emission. That is the reason for the sound and the light of such discharges. In short, as @JonCuster calls it, miniature lightning bolts.
{ "domain": "physics.stackexchange", "id": 29062, "tags": "electrostatics, electricity, electrons, statics" }
Can ideal dipoles be associated to a covariant four-current?
Question: I am trying to check if the classical electromagnetic sources from a point electric/magnetic dipole do form a true four-current. In this SE post, it is shown that a point electric charge do transform covariantly between inertial reference frames (IRF). As a point dipole is just a case where two opposite point charges are placed very close to each other, and also as Lorentz transformations (LT) are linear, my first guess to the question would be that such four-current is indeed valid. Mathematically, the charge density $\rho$ and current density $\mathbf{J}$ due to a static point dipole at rest are known to be $\rho = -(\mathbf{p}\cdot\nabla)\delta^3(\mathbf r)$ and $\mathbf J=-(\mathbf m \times \nabla)\delta^3(\mathbf r)$, where $\mathbf p = q \mathbf d $ is the electric dipole moment, with $q$ being the charge and $\mathbf d$ the charges' distance, $\mathbf m = (1/2)\int(\mathbf r \times \mathbf J) \mathrm{d}^3\mathbf r$ is magnetic dipole moment, $\delta^3(\mathbf r)$ is the three-dimensional Dirac delta distribution and $\mathbf r$ is the position vector. These equations are obtained from the electromagnetic potentials -- see J. D. Jackson's book, for example. For simplicity, let us assume another IRF moving with velocity $\mathbf u = (u,0,0)$ relative to the dipole's rest frame, which shall be denoted with primed variables. Applying the LT, a four-current $(c\rho,\mathbf J)$ transforms as $\rho'=\gamma \rho-\gamma u J_x/c^2$ and $J_x' = -\gamma u \rho + \gamma J_x $, $J_y'=J_y$, $J_z'=J_z$. For $t=0$, $\mathbf r$ and $\mathrm{d}^3\mathbf r$ transform as $\mathbf r'=(\gamma x,y,z)$ and $\mathrm{d}^3\mathbf r'=\gamma^{-1}\mathrm{d}^3\mathbf r$, respectively. Thus, the electric dipole moment must transform as \begin{eqnarray} \mathbf p' &=& \int \rho'\mathbf r'\mathrm{d}^3\mathbf r'\nonumber\\ &=& \int (\gamma \rho-\gamma J_x u/c^2)(\gamma x,y,z) \gamma^{-1}\mathrm{d}^3\mathbf r\nonumber\\ &=& \gamma \mathbf p_{\parallel} + \mathbf p_{\perp}-\frac{1}{c^2}\int (\mathbf u \cdot\mathbf J)(\gamma x,y,z)\mathrm{d}^3\mathbf r. \end{eqnarray} To solve the last integral, we use the result $\int r_i J_j \mathrm{d}^3\mathbf r=\epsilon_{ijk}m_k$, which is valid for static configurations. Therefore, \begin{eqnarray} \mathbf p' &=& \gamma \mathbf p_{\parallel} + \mathbf p_{\perp}-\frac{u}{c^2}(0-m_z+m_y)\nonumber\\ \mathbf p' &=& \gamma \mathbf p_{\parallel} + \mathbf p_{\perp}-\frac{1}{c^2}\mathbf u \times \mathbf m. \end{eqnarray} Using this result and recalling that $\nabla'=(\gamma^{-1}\partial_x,\partial_y,\partial_z)$ and $\delta'^3(\mathbf r')=\gamma \delta^3(\mathbf r)$, it is possible to show that $\rho'$ has the expected form. However, as the volume transforms as $V'=V/\gamma$, this result for $\mathbf p'$ also leads to an incorrect transformation equation of the polarization field $\mathbf P$, which is known to be $\mathbf P= \mathbf{P}_{\parallel}+\gamma(\mathbf P_{\perp}-\mathbf u \times \mathbf M/c^2 )$. The magnetic dipole moment $\mathbf m$, on its turn, transforms as \begin{eqnarray} \mathbf m' &=& (1/2)\int \mathbf r'\times\mathbf J'\mathrm{d}^3 \mathbf r'\nonumber\\ &=& \gamma^{-1}\mathbf m_{\parallel}+ \mathbf m_{\perp} +\mathbf u \times \mathbf p, \end{eqnarray} which now leads to a correct transformation of the magnetization field $\mathbf M$, but an incorrect transformation of $\mathbf J'$. This result for $\mathbf m'$ can be found, for example, in this old paper and, to first order in $u/c$ (i.e., $\gamma \approx 1$), in this paper. Alternatively, one could look directly into the transformations of the four-potential of static dipoles. I won't discuss this here for brevity, but this procedure also shows a covariant transformation for $\mathbf p'$ and $\mathbf m'$ only if $\gamma \approx 1$. The steps developed here show that, contrary to my initial intuition, the dipolar sources generally do not transform convariantly between IRFs, and therefore can not be associated to a four-current. I would like to understand this result better. As any physical, electromagnetic source can be described as a multipole expansion, a reasonable explanation is that transforming the dipolar sources between IRFs actually mixes them with higher other terms in the expansions, making the interpretation of the new sources impossible in the terms of dipoles only. In this scenario, the transformation of point charges would just be a special case where there is no such mixture with higher order terms. Is this reasoning correct? Answer: I think that you made a mistake in your calculations. Essentially, I think that it boils down to the fact that you forgot to change time as well. For relativistic calculations, it is always best to set $c=1$ as it only clutters the equations. The Lorentz transformation is given by: $$ \begin{align} t' &= \gamma(t-ux) & x' &= \gamma(x-ut) & x_\perp' &= x_\perp \\ t &= \gamma(t'+ux') & x &= \gamma(x'+ut') & x_\perp &= x_\perp' \\ \end{align} $$ with $\gamma = \frac{1}{\sqrt{1-u^2}}$. You need to be more careful with the delta and the partial derivatives: \begin{align} \delta(\vec x) &= \delta(x)\delta(x_\perp)\\ &= \frac{1}{\gamma}\delta(x'+ut')\delta(x_\perp')\\ \partial_x &= \gamma(\partial_x'-u\partial_t') \\ \nabla_\perp &= \nabla_\perp' \\ \partial_x\delta(x) &= \gamma(\partial_x'-u\partial_t') \frac{1}{\gamma}\delta(x'+ut') \\ &= (1-u^2)\delta'(x'+ut') \\ &= \frac{1}{\gamma^2}\partial_x'\delta(x'+ut')\\ \vec p \cdot \nabla \delta(\vec x) &= \gamma^{-1}(\gamma^{-1}p_x\partial_x'+p_\perp\cdot \nabla_\perp)\delta(x'+ut')\delta(x_\perp') \\ (\vec m \times \nabla \delta(\vec x))_x &= \gamma^{-1}m_\perp\times \nabla_\perp'\delta(x'+ut')\delta(x_\perp') \end{align} Note that already the $\gamma$ factors are different than yours. You don't need to integrate, it's easier to use only the deltas (one less source of mistake when converting the volume element): $$ \begin{align} \rho'(t',\vec x')&= \gamma(\rho(t,x)-uj_x(t,x)) \\ &= -(\gamma^{-1}p_x\partial_x'+p_\perp\cdot \nabla_\perp'-u m_\perp\times \nabla_\perp')\delta(x'+ut')\delta(x_\perp') \\ p_x' &= \gamma^{-1}p_x\\ p_\perp' &= p_\perp-\vec u\times \vec m \end{align} $$ so you got the wrong $\gamma$ factor for the parallel component. Btw, the advantage of actually calculating the new charge density is also to check that it is the result of an electric dipole and that there are no new charge distributions that intervene. Similarly for the magnetic dipole: $$ (\vec m \times \nabla \delta(\vec x))_\perp = \gamma^{-1}(m_xe_x\times \nabla_\perp'+\gamma^{-1}m_\perp\times e_x \partial_x')\delta(x'+ut')\delta(x_\perp') $$ so: $$ \begin{align} j_x'(t',\vec x')&= \gamma(j_x(t,x)-u\rho(t,x)) \\ &= -(m_\perp\times \nabla_\perp'-u(\gamma^{-1}p_x\partial_x'+p_\perp\cdot \nabla_\perp'))\delta(x'+ut')\delta(x_\perp') \\ m_\perp' &= m_\perp+\vec u\times\vec p \\ j_\perp'(t',\vec x') &= j_\perp(\vec x)\\ &= -\gamma^{-1}(m_xe_x\times \nabla_\perp'+\gamma^{-1}m_\perp\times e_x \partial_x')\delta(x'+ut')\delta(x_\perp')\\ m_x' &= \gamma^{-1}m_x \end{align} $$ So I recover your transformation law for the magnetic moment. There is a small catch though. The point of looking at the Dirac deltas is to check whether the current distribution is still a magnetic dipole. You can see, there are some extra terms that cannot arise from a magnetic dipole: $$ \begin{align} \vec j' &= \vec j'_s+\vec j'_d \\ \vec j'_s &= -\vec m'\times\nabla\delta(x'+ut')\delta(x_\perp') &&\text{(static)}\\ \end{align} $$ This is because the charge distribution is not stationary (moving dipole). The dynamic component can be explained by the continuity equation: $$ \partial_t'\rho'+\nabla'\cdot\vec j_d' =0 $$ Due to the dynamic component, you'll need to take into account the velocity of the dipoles as well in the LR (given by the usual velocity addition formula). Things should now be consistent. Hope this helps.
{ "domain": "physics.stackexchange", "id": 95945, "tags": "electromagnetism, special-relativity, covariance, dipole-moment, multipole-expansion" }
Small space hash functions that are weakly but not strongly universal
Question: This is a follow up to this this question about weakly universal hash functions A family of hash functions $H_w$ is said to be weakly universal if for all $x \ne y$ : $$P_{h \in H_w}(h(x) = h(y)) \leq 1/m$$ Here the function $h:U \rightarrow [m]$ is chosen uniformly from the family $H$ and we assume $|U| > m$. A family of hash functions $H_s$ is said to be strongly universal if for all $x \ne y$ and $k, \ell \in [m]$: $$P_{h \in H_s}(h(x) = k \land h(y) = \ell) = 1/m^2$$ I previously asked for an example of a hash function family which is weakly universal but not strongly universal. The very nice answer was: Take $U = [m+1]$, and consider the functions $h_i$, for $i \in [m]$, given by $$ h_i(x) = \begin{cases} x & \text{if } x \neq m+1, \\ i & > \text{if } x = m+1. \end{cases} $$ The same approach can be used for arbitrary $|U|$: fix the first $m$ coordinates, and make all other coordinates uniformly and independently random. For arbitrary $|U|$, it seems that in order to represent a single hash function in practice you would need to store a lookup table of size $|U|$ to ensure that the hashed values are independent and truly random. Then for every key, you would look up its random value in the vast lookup table to compute the hash function. Is there a hash function family where the hash functions require constant or log space that achieves the same result of being weakly but not strongly universal? In other words, are there any practical hash function families that are weakly not but strongly universal? Answer: Let $H$ be a family of strongly universal hash functions from $U$ to $[m]$. Construct a new family of hash functions from $U \cup \{x\} \to [m]$ by extending all functions $h \in H$ with $h(x) = 1$. The family is weakly universal since $h(u)$ is distributed uniformly for every $u \in U$, but it is clearly not strongly universal.
{ "domain": "cs.stackexchange", "id": 13315, "tags": "hash, hash-tables, probabilistic-algorithms, hashing" }
Can't build the face recognition package
Question: Hi, I got face recognition tutorial on ros wiki page: www.ros.org/wiki/face_recognition but i can't build it. I tried to build the workspace using catkin_make and rosmake. I use the command "catkin-make" in the face_recognition workaspace but it already contains a CMakeLists.txt so the command doesn't work. I also tried the "rosmake face_recognition" but I have this error "ERROR: No arguments could be parsed into valid package or stack names." despite the fact that there is a "stack.xml" file in the folder. Can somebody help me please? Thank you Originally posted by RosFaceNoob on ROS Answers with karma: 42 on 2014-03-31 Post score: 0 Original comments Comment by demmeln on 2014-04-01: Please edit your question to include more information, like the exact commands you are executing and the full output you are getting. Comment by RosFaceNoob on 2014-04-01: Sorry. I edit the question. Answer: This is a rosbuild/rosmake package, so don't try to build it with catkin. The code hasn't been touched for the last two years it seems, so not sure if you can make it work easily. Please make sure the stack is contained in your ROS_PACKAGE_PATH environment variable. What does env | grep -i ROS output? Edit: To add your folder to ROS_PACKAGE_PATH, you need to exectue export ROS_PACKAGE_PATH=/path/to/your/folder/containing/face/recognition:$ROS_PACKAGE_PATH after the source /opt/ros/hydro/setup.bash command. You can either do this directly in the terminal, or add it to your .bashrc file. Originally posted by demmeln with karma: 4306 on 2014-04-01 This answer was ACCEPTED on the original site Post score: 1 Original comments Comment by RosFaceNoob on 2014-04-01: The output is : ROS_ROOT=/opt/ros/hydro/share/ros ROS_PACKAGE_PATH=/opt/ros/hydro/share:/opt/ros/hydro/stacks ROS_MASTER_URI=http://localhost:11311 LD_LIBRARY_PATH=/opt/ros/hydro/lib CPATH=/opt/ros/hydro/include PATH=/opt/ros/hydro/bin:/usr/lib/lightdm/lightdm:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games PWD=/opt/ros/hydro/lib/gscam ROSLISP_PACKAGE_DIRECTORIES= ROS_DISTRO=hydro PYTHONPATH=/opt/ros/hydro/lib/python2.7/dist-packages PKG_CONFIG_PATH=/opt/ros/hydro/lib/pkgconfig CMAKE_PREFIX_PATH=/opt/ros/hydro ROS_ETC_DIR=/opt/ros/hydro/etc/ros OLDPWD=/opt/ros/hydro The stack is not in my ROS_PACKAGE_PATH. How can I add it? Comment by RosFaceNoob on 2014-04-01: Thank you, It works. But if I do a "source /opt/ros/hydro/setup.bash", the path is cleaned so I have to do it again. Is there a way to change definitly the path? Comment by demmeln on 2014-04-01: put both commands in your .bashrc Comment by RosFaceNoob on 2014-04-01: I add those lines to my '.bashrc' but it doesn't work. "source /opt/ros/hydro/setup.bash export ROS_PACKAGE_PATH=~/procrob_functional-master:$ROS_PACKAGE_PATH" Comment by demmeln on 2014-04-01: Have you opened a fresh terminal afterwards? Comment by RosFaceNoob on 2014-04-01: I just forgot to source the file before testing. It's fine now
{ "domain": "robotics.stackexchange", "id": 17483, "tags": "ros, build" }
Will curvature of space time break a spaceship in half , if its top half is in a curved region and bottom half is not?
Question: My question is simple. If we have a spaceship, whose top half is in a region of highly curved space, while the bottom half is in a region of flat space, will the top half bend in a way, which will break the ship in two? Or will it not break, because the ship is not bending, but rather space itself is bending? I am not concerned about how realistic it is to have such a sharp difference in curvature between two nearby regions of space. My question is IF there was such a situation, then what would happen? Answer: The ship will break due to tidal forces. If the gravity strength is high enough then the ship will break in two where the ship crosses the surface of gravity-no-gravity. It can be of course that the difference in force (tidal force) between just inside and just outside the surface to make the ship break is already present somewhere inside the surface. In that case, the ship will break in the gravity region. Note though that the kind of gravity field you envision isn't present in reality.
{ "domain": "physics.stackexchange", "id": 78595, "tags": "general-relativity, spacetime, curvature" }
1x1 convolutions, equivalence with fully connected layer
Question: I'm confused by the concept of equating a 1x1 convolution with a fully connected layer. Take the following simple example of a 1x1 convolution of 2 input channels each of size 2x2, and a single output channel. The only way I can relate this to fully connected layers is to say that there are 4 fully connected layers, one for each location in the input feature map (inputs and outputs colour coded). From what I can understand my interpretation is consistent with the Network in Network paper[Lin et al. 2013] which describe the 1x1 as being equivalent as cross channel parametric pooling The cross channel parametric pooling layer is also equivalent to a convolution layer with 1x1 con- volution kernel. I have seen this one from Yann LeCunn equating 1x1 convolutions to a fully connected layer. And I have read this answer and I'm just not seeing the equivalence between a 1x1 convolution over an input volume and a single fully connected layer... Any insight would be appreciated, if you can please relate back to the example above. Thanks! Answer: The interpretation that the 1d convolution given in the OP can be duplicated with four separate fully-connected layers is correct (see diagram). Also, in at least some implementations, kernel weights used during a 1x1 convolution can be made trainable the same way weights in a fully-connected layer can be made trainable. These points made, every fully-connected layer can not be mathematically duplicated by an equivalent 1x1 convolution. This is based on the definition that 1x1 convolution performs a "column-wise dot product" such that every pixel column in a multi-layer feature map is reduced to a single number (pixel). A fully-connected layer intermixes weights differently from the way weights are intermixed when performing a 1x1 convolution. In summary, fully connected layers and 1x1 convolutions each have their own use cases -- some overlap among these use cases exists; however, the two are not intended to be mathematically equivalent in a general sense.
{ "domain": "datascience.stackexchange", "id": 8685, "tags": "neural-network, convolutional-neural-network" }
Inner product of wavefunctions
Question: im trying to understand the meaning of this inner product: $⟨\psi_a|H|\psi_b⟩$. $H$ can be a time-independent hamiltonian. I know that $⟨\psi_a|H|\psi_a⟩$ is the expectation value, but I don't know the meaning of the first inner product. Can we say that $|\langle\psi_a|H|\psi_b\rangle|^2$ is equal to the probability that the measurement of H over the state $\psi_b$ gives the state $\psi_a$? Answer: As in any inner product you can take it as a projection of the state vector $|\psi_a\rangle$ in the direction of $H|\psi_b\rangle.$ The latter is just another state vector, for example $H|\psi_b \rangle = |\psi_b\prime\rangle,$ then $\langle \psi_a |\psi_b\prime\rangle$ is just a projection of one in the direction of the other. You can also take it as the Matrix element of the Hermitian operator $H_{ab},$ if you allow for $|\psi_i\rangle$ to be a basis of the State Space. In a more physical interpretation you could argue that you could obtain the probability of a state |$\psi_b\rangle$ transitioning to $|\psi_a\rangle$ in time if $$|\psi_b (x,t)\rangle = e^{tH/\hbar} |\psi_b(x,0)\rangle,$$ then the term $\langle \psi_a |\psi_b\prime\rangle$ would determine the probability amplitude of the transition from $b$ to $a.$ (Expand $|\psi_b(x,0)\rangle$ in a basis of eigenvectors of $H$ and the same for $|\psi_a(x,0)\rangle$ for this to work as a possible interpretation.
{ "domain": "physics.stackexchange", "id": 67195, "tags": "quantum-mechanics, hilbert-space, wavefunction, probability" }
Examples of NP Complete key exchange algorithms
Question: There's a number of questions on the internet (this site and others; e.g. Why hasn't there been an encryption algorithm that is based on the known NP-Hard problems? ) discussing NP hardness of different asymmetric cryptosystems. How well established are NP hard key-sharing systems? That is, systems for establishing a shared key (that can then by used in symmetric encryption) that are based on problems that are known to be NP hard. I was prompted to this question upon reading about https://en.wikipedia.org/wiki/Anshel-Anshel-Goldfeld_key_exchange and wondering if it could be shown to be NP complete when implemented on $GL_n(F_2)$ or $GL_n(\mathbb{R})$, cause these look like awfully hard constraint-satisfaction or quadratic optimization problems at a first glance. The corresponding problem this is based on is the Simultaneous Conjugacy Problem. I'm aware that there is an important distinction between problems that are merely NP hard in the worst case -- but easy on most random instances, as opposed to problems that are "average-case NP" -- given a set of random instances, solving half of them is still hard. I'd be interested in hearing about key-sharing systems that rely on either notion of hardness. Answer: There are no known public-key cryptographic algorithms that have been proven to be NP-hard to break. None. So we can't provide you examples, because none are known. Answer to your original question: The analysis at Why hasn't there been an encryption algorithm that is based on the known NP-Hard problems? also applies to public-key exchange. Cryptography requires average-case hardness. NP-hardness relates to worst-case hardness; it has no notion of a distribution on inputs, probability, or any kind of "average". Worst-case hardness does not appear to imply average-case hardness: there are many problems that are believed to be hard in the worst case, but easy in the average case. Status of Impagliazzo's Worlds? has some pointers on this topic.
{ "domain": "cs.stackexchange", "id": 7212, "tags": "complexity-theory, np-complete, cryptography" }
How to select the optimal beam size for beam search?
Question: Most Text Generation Models use beam search to select the optimal output candidate. How does one choose the optimal beam size? It would probably vary from task to task, dataset to dataset, and model to model. But given it all these parameters are fixed, how do we choose the optimal beam size? Theoretically scores(beam_size) > scores(beam_size -1) but practically that may not be the case when evaluating for metrics like ROUGE, or BLEU. So is it experimentally determined, for example, to run it for all beam sizes and report the one with the best beam size? I am particularly curious about two aspects: In research projects, do people tune the beam size parameter or do they just take the largest reasonable beam size that fits whatever GPU they have? When these models are deployed in the real world, how is the beam size determined given the incoming distribution of inputs may be wildly different from the training dataset such that the empirically validated beam size? Or is this not a significant enough concern for resources to be deployed for this optimization? Answer: Large beam sizes do not lead to improvements but to degradation in the generated text quality, as described in the article Empirical Analysis of Beam Search Performance Degradation in Neural Sequence Models. Also, article On NMT Search Errors and Model Errors: Cat Got Your Tongue? gives a nice insight into the problems of beam search for translation. In general, the beam size is a hyperparameter that must be tuned. However, given that the "good values" are in a small range, the same value is normally used (for each task) instead of exploring different options. In the first mentioned article, you can find a table with an analysis of different beam sizes for different tasks: In machine translation, the typical value used in most papers is 4-5. For instance, in the article that proposed the Transformer architecture (Attention is all you need), the beam size was 4. Note that it is possible to mitigate the problems of large beam sizes by e.g. normalizing scores by sentence length. For instance, in Six Challenges for Neural Machine Translation we can see that, in machine translation for some language pairs (see German-English, in Figure 10), there is no degradation with increasingly large beam sizes. Normally, the beam size value is not changed for different input distributions (e.g. different domains). At least there seems to be no literature on the matter. Nevertheless, there are other popular decoding strategies apart from beam search. For instance, in LLMs, it's typical to use temperature sampling (the generated token is sampled from the probability distribution after applying a temperature factor $\alpha$, which can either flatten the distribution or sharpen it) or top_p/nucleus sampling (you sample from the probability distribution, but only consider the top probability tokens that add up to a specific cumulative probability p). You can check this answer for more decoding strategies.
{ "domain": "datascience.stackexchange", "id": 12177, "tags": "machine-learning, nlp, hyperparameter-tuning, sequence-to-sequence, text-generation" }
Time complexity of adding a vertex to adjacency list
Question: Multiple sources state that the time complexity of adding a vertex to an adjacency list is O(1) and my understanding right now is that this is because of optimizations with hash tables. If we use an array of linked lists, then the time complexity is O(V) right? Because to add a new vertex we have to make a new array of size V + 1. I just wanted to confirm my line of thinking against pre-existing information. Answer: In an array of list implementation of adjacency list, the worst-case cost of adding a new vertex is $O(V) $ because in the worst-case the array is full and you need to create a larger array which is say 2 times larger than the current. Then you will then copy the $V$ lists. The cost of copying a list is $O(1)$ assuming we are copying the pointer to a list and not copying each entries of the list. However, the amortized cost of inserting is $O(1)$. Please refer to the amortized analysis of dynamic arrays (https://en.m.wikipedia.org/wiki/Amortized_analysis) . As for the adjacency matrix, you can also dynamically resize but the worst-case cost of insert is $O(V^2)$ since we are to copy individual items. The amortized cost of insert is $O(V) $ applying the same dynamic array analysis.
{ "domain": "cs.stackexchange", "id": 19642, "tags": "algorithms, graphs" }
How can I combine 2 PDA's into 1
Question: I need to form PDA for this language: {$a^nb^m|n=m \vee n=2m$} I know the idea of building each one separately but how do I combine them into 1 PDA? LHS: for every 'a' I push 'A' inside stack and for every 'b' I eject 'A'. RHS:for every 'a' I push 'A' inside stack and for every 'b' I eject 2 times 'A'. How can I combine them? Can I somehow use non determinism? Answer: Construct one PDA for $n=m$ and another one for $n=2m$. Branch to one of them using an $\epsilon$ transition at the very beginning.
{ "domain": "cs.stackexchange", "id": 16632, "tags": "automata, context-free, pushdown-automata" }
Show the equivalence of two electrostatic energy
Question: $$U = \frac{1}{2}\iint_{all space}^{ } \frac{\rho(1)\rho(2)}{4\pi\epsilon_{0}r_{12}}dV_{1}dV_{2} $$ and $$U = \frac{\epsilon_{0}}{2}\int_{allspace}^{ } \vec{E} \cdot \vec{E} dV.$$ In the left equation, the energy can also be described with the potential $ \phi $ where $U = \frac{1}{2}\int_{allspace }^{ } \rho\phi dV$ because $ \rho = - \epsilon_{0} \triangledown ^{2} \phi $ plug that in $U = -\frac{\epsilon_{0}}{2}\int_{allspace }^{ } \phi \triangledown^{2}\phi dV$ This is where I'm stuck. I think you can change $ \phi \triangledown^{2}\phi$ into a form of $ \triangledown\phi \cdot \triangledown\phi $ such that $\triangledown\phi = \vec{E} $ Thus satisfying the question. However, I'm not too clear of that. $ \phi \triangledown^{2}\phi => \triangledown\phi \cdot \triangledown\phi $ I don't think I know any vector properties that allow this. or at LEAST $ \phi \triangledown^{2}\phi => \triangledown\phi \cdot \triangledown\phi + A $ where $A$ ultimately results in 0 Answer: Just for the sake of having an answer: In $\phi \nabla ^2 \phi$, consider the term $\phi \frac{\partial ^2 \phi}{\partial x^2}$. By the product rule, this is equal to $\frac{\partial}{\partial x} \left(\phi \frac{\partial \phi}{\partial x}\right) - \left(\frac{\partial \phi}{\partial x}\right)^2$. Combining all three terms, we get $\phi \nabla^2 \phi = \nabla\cdot \left(\phi \nabla \phi\right) - \nabla \phi \cdot \nabla \phi$. Upon integrating, the first terms turns into a surface integral over all space. Since $\phi$ goes as $1/r$, $\phi \nabla \phi$ goes as $1/r^3$, and since the surface element goes as $r^2$, the integral is zero when $r \to \infty$. Therefore we're left with $- \nabla \phi \cdot \nabla \phi$, that is, $-E^2$.
{ "domain": "physics.stackexchange", "id": 17056, "tags": "homework-and-exercises, electrostatics, potential-energy" }
Angular momentum conservation under Galileo transformation
Question: I was trying to see when angular momentum is independent of choice of origin, but then it seems angular momentum no longer conserved under Galileo transformation to me : Given a point mass is doing circular orbital motion in an inertial frame: $$\vec L = \vec r \times \vec p $$ In a new relatively stationary frame with displacement $\vec R$:$^{\dagger}$ $$\vec {L'}=\vec {r'} \times \vec {p'}$$ $$\vec {L'}=({\vec R +\vec {r}} )\times \vec {p}$$ $$\vec {L'}=({\vec R +\vec {r}} )\times \vec {p}$$ Take time derivative: $$\dot {\vec {L'}}=({\dot{\vec R} +\dot{\vec {r}}} )\times \vec {p} +({\vec R +\vec {r}} )\times \dot{\vec {p}}$$ $$\dot {\vec {L'}}=0 +({\vec R +\vec {r}} )\times \dot{\vec {p}}$$ Given angular momentum is conserved in an orbital motion in the old frame ($\vec {r} \times \dot{\vec {p}} = 0$): $$\dot {\vec {L'}}=\vec R \times \dot{\vec {p}}$$ However, this term is not always zero - it is absurd, since we will not have a new torque by picking up a new stationary inertial frame. What's wrong with my reasoning? $\dagger$: both $\vec p$ and $\vec {p'}$ should be same in both inertial frames. Answer: The thing is that you are mixing two things. On the one hand, if you move your origin, all positions obviously suffer the transformation $x_{new}=x_{old}+x_{OO'}$ (and so on). But, on the other hand, the radius angular momentum $L$ is NOT the position of the particle, but its position with respect to the point you chose. In other words, $\vec{L}_Q=\vec{r}_{PQ}\times \vec{p}_{PQ}$ You don't take the radius to the origin (unless the origin is the point you chose). You take the radious from the particle's position to the point you chose. You obviously have $\vec{r}_{PQ}\equiv \vec{PQ}=\vec{OQ}-\vec{OP}$ and changing $O$ doesn't matter because that cancels out. Of course you were right: changing the reference frame doesn't create a new torque... provided that the point you calculate $L$ respect with remains the same one. If you change origin AND the point, then you have a new value, but that's logical because $L$ depends on the point you chose to compute it.
{ "domain": "physics.stackexchange", "id": 43288, "tags": "newtonian-mechanics, angular-momentum, conservation-laws, inertial-frames, galilean-relativity" }
Is it possible to mechanically isomerize an sp3 hybridized carbon center?
Question: Imagine I have an sp3 hybridized carbon attached to four separate polyethylene chains. By pulling on the polyethylene chains in some manner, is it possible for me to mechanically isomerize the chiral center prior to breaking any carbon-carbon bonds? Perhaps a more realistic scenario would involve shining a laser on a compound similar to 2,2-dimethylpropane (i.e. a fully saturated carbon compound where four carbons are attached to an sp3 hybridized carbon), but with asymmetric functionalizations on the bonded carbons to allow for detection of isomerization. Just as in the previous scenario, can mechanical isomerization occur prior to breaking any carbon-carbon bonds? Quoting from Georg's answer: "In "practice" there is a big problem, You would need two Laplacian demons (with two hands each) to do the experiment..." It's not clear to me that this is an impossible experiment. For example, you could attach one polyethylene chain to a surface and then try to make a covalent bond between the other relevant chain and, say, an AFM tip. If this is successful, one would then proceed in a manner similar to that reported in "How Strong is a Covalent Bond?" by Michel Grandbois et. al. (http://www.sciencemag.org/content/283/5408/1727.short), which estimates a ~4.0 nanonewton rupture force for a C-C bond (See Fig. 4) and directly measures the rupture of a single Si-C bond to be ~2.0 +/- 0.3 nanonewtons. In the case where one wants to look for mechanical isomerization of the sp3-hybridized carbon, one would look for an earlier sub-4.0 nN peak in the force vs. extension curve (or whatever critical force is experimentally measured for breaking a C-C bond), perhaps allow relaxation at some critical force to complete the isomerization process, and then compare the observed displacement with a geometrical model. Sounds ridiculously hard, but I see no fundamental reason it couldn't be done. Answer: The exact answer is a problem of computational chemistry, maybe somebody has caculated the energy of such a center in a planar configuration, but I don't know. In "practice" there is a big problem, You would need two Laplacian demons (with two hands each) to do the experiment :=) Generally, on the level of such a molecule, "mechanic" is a unrealistic simplification. Nature on this level is not electric as opposed to mechanic or what ever. So forcing the four ligands in a plane (the activated complex of thiś isomerisation) will lead to rupture of one of the bonds, either forming a radical pair or an ion pair, this depends on the milieu (gas phase, polar liquid, unpolar etc) The possible (imaginable) configurations of the four ligands make a energy hypersurface and system will go over the lowest pass to a more stable configuration. Why can one be so shure that it impossible? Think of reason for that "hybridisation"! Why is the cabon not bound to three neighbours at 90 degrees like PH3 (nearly) by three bonds via a pure p-orbital and the forth to a pure s-orbital? Reason is simply space. There is not enough room around a carbon , contrary to atoms in higher periods like phosphor or sulfur. In this sense the hybridisation is forced by "mechanics". Think of Gillespie-Nyholm rules and retropolate "down" for the crowded space around a C-Atom.
{ "domain": "physics.stackexchange", "id": 1316, "tags": "physical-chemistry, geometry, quantum-chemistry" }
Why Random Access Machine could only have $N \leq 2^{W}$ memory slots? Is it general theorem of informal rule?
Question: Here: $W$ is "bitness" - number of bits in one machine word, that could be stored in memory slots $N$ - number of memory slots (each has bitness equals $W$) My solution: If max length of machine word is $W$ so there are $2^{W}$ different machine words. Suppose, that each possible machine word works like an address of appropriate slot. Hence if $N > 2^{W}$ there will be slot, that couldn't be accessed by address of machine word. Problem: I don't actually like my solution, because you may use RAM like an assembler and if $W$ is relatively small. For example if $W=1$ and $N=6$, what will be wrong if we manually will LOAD data to 6th slot of RAM? But on the other hand real systems are much more reliable and addressing is really important (it isn't safe to use manual indexing). Answer: Your initial solution sounds correct to me. The problem with your second idea of switching between RAM sticks, using a "select RAM stick" instruction, is that it technically is not random-access anymore: Reading data from only one DIMM requires 1 op per access (switch address) Switching between DIMMs requires up to 2 ops per access (switch address + switch RAM stick) Even if you were to use a single instruction that reads 2 registers at once, it will require 2 ops to set the registers. If you add an operation that writes two registers at once you have arguably increased the effective word/register length. It is worth pointing out that modern computers are not true random-access machines anymore. RAM DIMMs consist of banks of rows, each coming with switching overhead. Modern CPUs try to predict and prefetch memory regions that are going to be required next which makes predictable memory access faster than random-access. Memory is read in chunks (cache lines) which makes sequential access faster than strided access.
{ "domain": "cs.stackexchange", "id": 21554, "tags": "algorithms, data-structures, computation-models" }
Efficient Marker Detection for Images
Question: I need to detect a known number of simple markers (chessboard patterns) in a color image efficiently. I implemented normalized cross correlation (NCC) as a pixel shader via WebGL since I need this to work in a client application in a browser. Detection works fine and is sufficiently robust, but I need better performance. It currently takes a number of seconds to calculate the NCC image, and I'm already using 'obvious' optimizations such as cropping the image (I have a rough indication where the markers should be) and reducing the resolution. I also optimized the code itself (e.g. using loop unrolling, but that is another matter and probably belongs on SO). The image has a resolution of 900x520 pixels and the markers are 22 square pixels. The code runs on tablet computers with Intel Atom processors. My question is: Are there algorithms that are better at utilizing what I know about the image and the marker geometry, ideally ones that are suited for almost branchless pixel shaders? For instance, I was considering a band pass filter (a fourier transformation that only leaves the frequency of the marker, so to speak) or FNCC (JP Lewis's paper), or something that utilizes the fact that the markers are black and white. Unfortunately, it seems these approaches are comparatively expensive since the FFT of the image isn't cheap and my markers are small compared to the image resolution. Testing a ton of algorithms blindly isn't an option, unless there's a library I'm not aware of that helps me implement this in JavaScript or as glsl pixel shaders. Even then, an indication of a more efficient algorithm would be helpful. EDIT: Added an image of the marker. The background is normally transparent, of course, i.e. the NCC will ignore those pixels Answer: This is a saddle point. I would suggest using derivative operators to spot such discontinuities. In fact, Haralick presents a broad overview of these methods in his Topographic Primal Sketch. The use of approximate Facet model would give you a speed boost. I used them for edge detection mathworks file exchange - Simple Edge Detection Using Classical Haralick Method. There is also one implementation available here. Even though this question and the topic is rather old, I would like to update that, the method presented in the following work contains no branching and is quite suitable for the shader: Birdal, Tolga, Ievgeniia Dobryden, and Slobodan Ilic. X-Tag: A Fiducial Tag for Flexible and Accurate Bundle Adjustment. 3D Vision (3DV), 2016 Fourth International Conference on. IEEE, 2016.
{ "domain": "dsp.stackexchange", "id": 2323, "tags": "image-processing, computer-vision, cross-correlation, detection, augmented-reality" }
Is it possible to produce images of pair production in home-made cloud chamber?
Question: There are some nice pictures on the web showing the counter-spiralling paths of an electron positron pair produced in a bubble chamber with a uniform magnetic field, for example:- (source: bigganblog.com) Would it be practical to produce a cloud chamber and [Helmholtz coils] capable of imaging such events in an ordinary domestic garage? What sort of source would I need? How often might such events be detected? Answer: Basics You need to be able to generate a pair-creation event and be able to image it well enough to know what it was. Getting a pair conversion event Pair creation calls for the highest energy gamma you can get and as much mass in the chamber as you can arrange. The odds of getting a pair-conversion event are graphed in figure 31.17 of the 2013 Review of Particle Physics chapter on the passage of radiation through matter, but it doesn't hit 10% in air until somewhere between 6 and 10 MeV. While this process can happen as soon as you get energies above $2m_e \approx 1.022 \,\mathrm{MeV}$, you need some energy left over to give the created particles some momentum. The mass attenuation length is graphed in figure 31.16 of the same reference, but for a few MeV gammas you are looking at distances around 20 cm. Being able to tell that you got it To be able to tell that you got a pair conversion event you need long enough tracks to convince yourself that you have a isolated "vee" and to tell that the tracks curve in opposite directions. That calls for a long enough propagation distance for the you to observer that the track is line-like and to see a non-trivial curvature. For arguments sake let's say that a one-tenth radian curve is good enough. That means getting tracks that are at least one-tenth their radius of curvature. The radius of curvature of a particle in a magnetic field is given by $$ r = \frac{p}{qB} \,.$$ (Note that I don't write $p=mv$ because the pair may be at least moderately relativistic. Just stick to momentum.) Engineering concerns You need to pick a source of gamma rays, and you want it as energetic as possible. For really energetic gammas you need an accelerator based system, but this makes the project many times as big as it started. I'm going to assume you will use a radiological source, despite the low energy and unpredictable timing. Cobalt-60 would be enough if you have patience, but I'd suggest Thorium-232 if you can get it (you'll actually be taking advantage of the high energy gamma from the daughter isotope Thallium-208). Using radiological sources means that you need a continuous data-acquisition systems of some kind--say digital video. Finally you have to chose the strength of the magnetic field, and that depends on the expected momentum of your pair, which depends on the energy of your source.
{ "domain": "physics.stackexchange", "id": 15146, "tags": "home-experiment, pair-production" }
Javascript : Etch-a-sketch
Question: I am a beginner in javascript and I have made this project as a part of The odin project. Feautres: Draw by hovering the mouse on grid Erase Clear the entire sketch at once Reset the grid size I have created the grid using only flexbox. Here is my code: function createGrid(grid, rowSize, colSize) { for (let i = 0; i < rowSize; i++) { grid[i] = document.createElement("div"); grid[i].setAttribute("id", "container"); let pixels = []; for (let j = 0; j < colSize; j++) { pixels[j] = document.createElement("div"); pixels[j].setAttribute("id", "pixel"); grid[i].appendChild(pixels[j]); } } return grid; } function appendArray(pDiv, cDiv) { cDiv.forEach((div) => pDiv.appendChild(div)); return pDiv; } (() => { const GRID_WIDTH = 16; const GRID_HEIGHT = 16; const sketchEl = document.querySelector("#sketch"); const eraseEl = document.querySelector("#erase"); const buttonsEl = document.querySelector(".buttons"); let gridCont = document.querySelector("#grid-container"); let grid = []; let state; grid = createGrid(grid, GRID_WIDTH, GRID_HEIGHT); gridCont = appendArray(gridCont, grid); buttonsEl.addEventListener("click", changeState); gridCont.addEventListener("mouseover", eventHandler, false); function changeState(event) { state = event.target.id; switch (state) { case "reset": sketchReset(); break; case "gridSize": sizeReset(); } } function sketchReset() { const pixels = document.querySelectorAll("#pixel"); pixels.forEach((pxl) => pxl.setAttribute("style", "background-color:'transparent';") ); } function sizeReset() { let size = parseInt(prompt("Enter grid size (MAX=100) : ")); if (size <= 100) { grid.length = 0; grid = createGrid(grid, size, size); gridCont.innerHTML = ""; gridCont = appendArray(gridCont, grid); } } function eventHandler(event) { if (event.target.id !== "pixel") { return; } changeColor(event); } function changeColor(event) { if (state === "eraser") { event.target.setAttribute("style", "background-color:'transparent';"); return; } event.target.setAttribute("style", "background-color:black;"); } })(); body { display: flex; flex-direction: column; align-items: center; justify-content: center; } #header { font-size: 2.5em; } .flex-container { display: flex; gap: 2em; } .buttons { display: flex; flex-direction: column; justify-content: center; gap: 2em; } .button { font-size: x-large; } .grid-container { display: flex; height: 30em; width: 30em; border: 1px solid black; } .grid-container > div { flex: 1; display: flex; flex-direction: column; } .grid-container > div > * { flex: 1; } <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta http-equiv="X-UA-Compatible" content="ie=edge" /> <title>Etch-a-Sketch</title> <link rel="stylesheet" href="style.css" /> </head> <body> <script src="index.js" defer></script> <h1 id="header">Etch-a-sketch!</h1> <div class="flex-container"> <div class="buttons"> <button class="button" id="sketch">Sketch</button> <button class="button" id="eraser">Erase</button> <button class="button" id="reset">Reset</button> <button class="button" id="gridSize">Grid Size</button> </div> <div id="grid-container" class="grid-container"></div> </div> </body> </html> Answer: I'm going to only focus on the Javascript code in my answer. function createGrid(grid, rowSize, colSize) { for (let i = 0; i < rowSize; i++) { grid[i] = document.createElement("div"); grid[i].setAttribute("id", "container"); let pixels = []; for (let j = 0; j < colSize; j++) { pixels[j] = document.createElement("div"); pixels[j].setAttribute("id", "pixel"); grid[i].appendChild(pixels[j]); } } return grid; } I'm not sure why this takes a grid argument, given that the name is createGrid. Just create the array within the function. Secondly, ids are supposed to be unique. For a group of similar elements, you should use a class instead. See this SO question for more info. function appendArray(pDiv, cDiv) { cDiv.forEach((div) => pDiv.appendChild(div)); return pDiv; } There already is a function to append multiple children to an element: Element#append. The invocation is a bit different, as you'll need to use spread syntax to pass an array, ie. gridCont.append(...grid). gridCont.addEventListener("mouseover", eventHandler, false); eventHandler is an undescriptive name. I would rename it to something like gridMouseover. function changeState(event) { state = event.target.id; switch (state) { case "reset": sketchReset(); break; case "gridSize": sizeReset(); } } In my opinion, reset and gridSize shouldn't affect the global state, as they are one-time actions. As written, your code works, though restructuring the code will help for future modifications. function changeState(event) { switch (event.target.id) { case "reset": sketchReset(); break; case "gridSize": sizeReset(); break; case "sketch": case "reset": state = event.target.id; break; } } const pixels = document.querySelectorAll(".pixel"); (code modified to align with the createGrid change) There's no need for querySelectorAll here - just use document.getElementsByClassName("pixel") for better performance and more code clarity. function changeColor(event) { if (state === "eraser") { event.target.setAttribute("style", "background-color:'transparent';"); return; } event.target.setAttribute("style", "background-color:black;"); } This doesn't need to use the entire event object, just the pixel element that's being hovered, so only pass that as the argument. Full code: function createGrid(rowSize, colSize) { let grid = []; for (let i = 0; i < rowSize; i++) { grid[i] = document.createElement("div"); grid[i].setAttribute("class", "container"); let pixels = []; for (let j = 0; j < colSize; j++) { pixels[j] = document.createElement("div"); pixels[j].setAttribute("class", "pixel"); } grid[i].append(...pixels); } return grid; } (() => { const GRID_WIDTH = 16; const GRID_HEIGHT = 16; const sketchEl = document.querySelector("#sketch"); const eraseEl = document.querySelector("#erase"); const buttonsEl = document.querySelector(".buttons"); let gridCont = document.querySelector("#grid-container"); let state; let grid = createGrid(GRID_WIDTH, GRID_HEIGHT); gridCont.append(...grid); buttonsEl.addEventListener("click", changeState); gridCont.addEventListener("mouseover", gridMouseover, false); function changeState(event) { switch (event.target.id) { case "reset": sketchReset(); break; case "gridSize": sizeReset(); break; case "sketch": case "reset": state = event.target.id; break; } } function sketchReset() { const pixels = document.querySelectorAll(".pixel"); Array.from(pixels).forEach((pxl) => pxl.setAttribute("style", "background-color:'transparent';") ); } function sizeReset() { let size = parseInt(prompt("Enter grid size (MAX=100) : ")); if (size <= 100) { grid = createGrid(size, size); gridCont.innerHTML = ""; gridCont.append(...grid); } } function gridMouseover(event) { if (!event.target.classList.contains("pixel")) { return; } changeColor(event.target); } function changeColor(pixel) { if (state === "eraser") { pixel.setAttribute("style", "background-color:'transparent';"); return; } pixel.setAttribute("style", "background-color:black;"); } })(); body { display: flex; flex-direction: column; align-items: center; justify-content: center; } #header { font-size: 2.5em; } .flex-container { display: flex; gap: 2em; } .buttons { display: flex; flex-direction: column; justify-content: center; gap: 2em; } .button { font-size: x-large; } .grid-container { display: flex; height: 30em; width: 30em; border: 1px solid black; } .grid-container > div { flex: 1; display: flex; flex-direction: column; } .grid-container > div > * { flex: 1; } <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta http-equiv="X-UA-Compatible" content="ie=edge" /> <title>Etch-a-Sketch</title> <link rel="stylesheet" href="style.css" /> </head> <body> <script src="index.js" defer></script> <h1 id="header">Etch-a-sketch!</h1> <div class="flex-container"> <div class="buttons"> <button class="button" id="sketch">Sketch</button> <button class="button" id="eraser">Erase</button> <button class="button" id="reset">Reset</button> <button class="button" id="gridSize">Grid Size</button> </div> <div id="grid-container" class="grid-container"></div> </div> </body> </html> Some suggestions for future improvements: only draw pixels while the mouse is held - check out the mousedown and mouseup events. display validation errors for sizeReset.
{ "domain": "codereview.stackexchange", "id": 43770, "tags": "javascript, beginner, html, css" }
Creating Packages of ros--package_name type
Question: Folks, How do I convert my catking workspace in the equivalent of a, for instance, ros-indigo-openni2_camera type? By that I mean, how can I distribute my packages to other computers without having to share my code? I want to have the same effect as when I do, for instance, sudo apt-get install ros-indigo-openni2-launch on my terminal. Thanks. Originally posted by Pototo on ROS Answers with karma: 803 on 2016-11-01 Post score: 0 Answer: Do catkin_make --install. This will create an install directory and put all your package binaries, launch files and config files in /lib and /share respectively. You can zip this install folder and unzip it in a different catkin workspace. Make sure you copy /lib into /opt/ros/lib and /share to /opt/ros/share. Originally posted by ajain with karma: 281 on 2016-11-01 This answer was ACCEPTED on the original site Post score: 1 Original comments Comment by Pototo on 2016-11-01: seems like the "--" does not go before "install." What if I have personal ".txt" files I need to access? Will they also be installed inside the "install" folder? Comment by ajain on 2016-11-02: Yes. You can install rules to copy any directory into your /share directory of install like this - install(DIRECTORY launch/ DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/launch) This will copy all files inside /launch directory of your package to /share/<pkg_name>/launch .
{ "domain": "robotics.stackexchange", "id": 26120, "tags": "ros, catkin" }
tf2 cannot lookup transform (Lookup would require extrapolation)
Question: I've seen a few posts on this but none have helped me solve the problem. I am trying to look up the TF between two links but keep getting the same extrapolation error. ros::Duration cache_(10); tf2_ros::Buffer buf_(cache_); tf2_ros::TransformListener listener_(buf_); /* other random stuff */ geometry_msgs::TransformStamped grasp; try { geometry_msgs::TransformStamped grasp = buf_.lookupTransform("base_footprint", "jaco_link_hand", ros::Time(0), ros::Duration(11)); } catch (tf2::TransformException &ex) { cout << ex.what() << endl; } Running this code always results in: Lookup would require extrapolation at time 1425582589.007301092, but only time 1425582589.107081890 is in the buffer, when looking up transform from frame [jaco_link_hand] to frame [base_footprint] My understanding was that ros::Time(0) should just give me the latest TF however that does not seem to be the case. This is all running on the robot's onboard computer (my thought was network latency at first), and I have also tried allowing the program to run a while to fill the buffer. Running tf_monitor results in: Node: unknown_publisher 108.674 Hz, Average Delay: -0.109925 Max Delay: 0.163927 UPDATE 1 Removing the cache time (the ros::Duration) from the constructor fixes the problem. This seems odd since 10 is the default value so nothing should change. Originally posted by rtoris288 on ROS Answers with karma: 1173 on 2015-03-05 Post score: 2 Answer: Time(0) will give you the latest common time. If this is during initialization(or a recently constructed buffer starting to listen) it's quite possible that there's not a complete set of transforms at any common time. This is a race condition on your query vs incoming data. If you get this error continuously there might be a different issue. Since it's a race condition your changing of the buffer size might be independent/from a side effect. Originally posted by tfoote with karma: 58457 on 2015-03-05 This answer was ACCEPTED on the original site Post score: 3 Original comments Comment by lr101095 on 2018-05-30: @tfoote I'm getting the same error and i've already tried your suggestion. Would you mind looking at my question and suggesting a fix to my issue? thanks
{ "domain": "robotics.stackexchange", "id": 21063, "tags": "ros, transforms, transform, tf2" }
Complex notation of monochromatic planar waves in Griffith's
Question: The definition of planar waves in Griffith's Electrodynamics textbook is given as: The waves are travelling in the z direction and have no x or y dependence; these are called plane waves, because the fields are uniform over every plane perpendicular to the direction of propagation. Along with this image: Now generally $\mathbf E(x,y,z,t) = \mathbf E_0(x,y,z)e^{i(\mathbf {k.r}-\omega t)}$ So that means for plane waves, $\mathbf E(z,t) = \mathbf E_0(z)e^{i(kz-\omega t)}$ But in the picture that's given, clearly the wave has x dependence. I mean how can a wave be only one dimensional like F(z)? Also, how is the field uniform? (as its changing with time?) Answer: But in the picture that's given, clearly the wave has $x$ dependence. When Griffiths says this, he means that the field value doesn't depend on $x$. So (for example), the value of the electric field at $x = 0$, $y = 12$, $z = 2$, $t = 0$ is always the same as the electric field at $x = 4$, $y = 12$, $z = 2$, $t = 0$, or the electric field at $x = -1000$, $y = 12$, $z = 2$, $t = 0$. Also, how is the field uniform? (as its changing with time?) The statement applies to a particular moment in time. You're correct that on each of the planes perpendicular to the direction of propagation, the field value will depend on time. What Griffiths is saying is that at a particular moment in time, the field is has the same value (direction & magnitude) at every point on a given plane. I mean how can a wave be only one dimensional like F(z)? If I understand what you're asking here correctly, the answer is that we've chosen to look for a solution to Maxwell's equations that varies only with respect to $z$ (and $t$.) A priori, we don't know whether such a solution exists; what Griffiths does in the chapter you're referring to is to show that such solutions do exist and to derive their properties.
{ "domain": "physics.stackexchange", "id": 76113, "tags": "electromagnetism, electromagnetic-radiation, maxwell-equations" }
What are "cycles of anomaly" and "cycles of longitude"?
Question: In several early (pre-1600) astronomical texts I read about "cycles of anomaly" and "cycles of longitude", but it us unclear to me what these terms mean. They were clearly familiar to authors at the time and are undefined in any of the texts I've looked through. How were these terms employed in pre-1600s solar system models? What modern astronomical terms or observable events do they correspond to? Answer: Cycles of anomaly are lappings of a planet by the Earth. If there are $CA$ of them and the Earth orbited the Sun $N$ times during the time, then one may determine that the planet has orbited $N\pm CA$ times. Revolutions (not cycles) in longitude are periods between two adjacent nodes, i.e. moments when a planet's trajectory crosses the ecliptic. So it's roughly a revolution of a planet around the Sun but it should be measured by a particular method involving the nodes. Those concepts are perfectly suited for heliocentric (the modern) system; they are just not being used too often these days.
{ "domain": "physics.stackexchange", "id": 4721, "tags": "astronomy, orbital-motion, history, definition" }
Lorentz covariance properties of energy and momentum of electromagnetic field
Question: In special relativity the 4-tuple of numbers, energy and 3 components of momentum, form a 4-vector with respect to Lorentz transformations. Is it true that the analogous 4-tuple of energy and momentum of electromagnetic field in vacuum is also a 4-vector with respect to Lorentz transformation? Answer: Yes, it is true. If $\Sigma$ is any Minkowskian time slice and $T_{EM}^{\mu\nu}$ is the stress-energy tensor of the EM field (assumed to be an isolated system), then $$P^\mu = \int_\Sigma T_{EM}^{\mu0} dx^1dx^2dx^3$$ (assuming the integral converges) turns out to be a four vector and has the meaning you said. Notice that the four vector $P^\mu$ does not depend on the Minkowski reference frame whose $\Sigma$ is a spatial section. Actually all that is valid for every isolated field or isolated continuous system that vanishes sufficiently fast at spatial infinity. Here is the shortest proof I am able to produce. So assume that, in Minkowski spacetime, $$\nabla_\nu T^{\mu \nu}=0\tag{1}$$ (isolated system!). Consider two time slices $\Sigma$ and $\Sigma'$ associated to a pair of generally different future-oriented Minkowskian reference frames $x^0,x^1,x^2,x^3$ and $x'^0,x'^1,x'^2,x'^3$ respectively. Finally, consider a $4D$ cylinder $C$ with bases in $\Sigma$ and $\Sigma'$ and lateral surface of timelike type. We henceforth consider the portion of the system contained in the cylinder and we suppose that this lateral surface of $C$ is sufficiently close to the spatial infinity that the system vanishes outside $C$ between $\Sigma$ and $\Sigma'$. If $\omega$ is a constant co-vector, (1) implies $$\nabla_\nu (\omega_\mu T^{\mu\nu})=0\:.$$ The divergence theorem applied to $C$, dropping the contribution of the lateral surface leads to $$\int_{\Sigma} \omega_\mu T^{\mu0} dx^1dx^2dx^3= \int_{\Sigma'} \omega'_\alpha T'^{\alpha 0} dx'^1dx'^2dx'^3\:.$$ This means that the map $$\omega \mapsto \int_{\Sigma} \omega_\mu T^{\mu0} dx^1dx^2dx^3 = \int_{\Sigma'} \omega'_\alpha T'^{\alpha 0} dx'^1dx'^2dx'^3$$ does not depend of the used Minkowski reference frame nor on the instants of time used to define $\Sigma$ and $\Sigma'$ in each reference frame. The crucial observation is now that the above map (a) is linear (b) produces real numbers. A linear map form the space of co-vectors to $\mathbb{R}$ is an element to the dual space of co-vectors, i.e., it is a vector! We conclude that ther must exist a constant four vector $P$, such that $$\omega_\mu P^\mu =\int_{\Sigma} \omega_\mu T^{\mu0} dx^1dx^2dx^3 = \int_{\Sigma'} \omega'_\alpha T'^{\alpha 0} dx'^1dx'^2dx'^3 = \omega'_\alpha P'^\alpha\:,$$ for every co-vector $\omega$. Since $\omega$ is constant, it can be carried out of the integration symbol. Its arbitrariness eventually implies that $$P^\mu = \int_{\Sigma} T^{\mu0} dx^1dx^2dx^3$$ defines a four-vector (the same independently of the choice of the reference frame). In particular, $$P'^\alpha = \Lambda^\alpha_\mu P^\mu \tag{2}$$ when $$x'^\alpha = \Lambda^\alpha_\mu x^\mu + b^\alpha\:.$$ As a final remark, I stress that if we use the same reference frame so that $\Sigma$ and $\Sigma'$ are simply the rest spaces at different times, independence of $P^\mu$ from the time labeling the time slice just says that the component of the four-momentum are separately constant in time in that (generic) reference system. Energy and momentum are separately conserved in every fixed inertial reference frame. ADDENDUM: the simplest example of a consistent EM field + charges model. Coming back to the case of the EM field, when we add the source of the field to the picture and we consider the total system EM field + charges, the dynamically conserved stress energy tensor is the total stress-energy tensor of the system. The conserved total quantities should be defined accordingly. As discussed in the last item of @Andrew Steane's excellent answer, a classical model of a finite-radius elementary electric charge gives rise to the insurmountable problem of 4/3 factor. This happens when one tries to interpret the mass and the momentum of an elementary charge as of completely electromagnetic nature. One could try to add unknown Poincaré's stresses without solving the physical problem (and the zero-radius model leads to the even worst issue of an infinite self-force). A posteriori, all that seems to be related to the physical fact that an electron is a quantum particle and any classical description is hopeless from scratch. However the quantum problem is affected by similar issues, those of the renormalization In any cases, effective macroscopic models, for charged fluids or gases, can be constructed an used for the total system EM field + charges, where the charges are those of a macroscopic system. The simplest one is the model of rarefied gas where the internal stresses of the gas can be approximatively disregarded. This is nothing but the perfect-fluid stress-energy tensor with zero pressure. (The model can be extended to the presence of pressure and macroscopic stresses for a concrete macroscopic fluid.) Its stress energy tensor reads $$T_M^{ab} := \mu V^aV^b\:.$$ Above, (a) $V$ is the field of four-velocities, (b) $\mu \geq 0$ is the density of mass (mesured at rest with an integral line of $V$. (c) We also assume that the gas is charged with a density of electric charge $$\rho = k \mu$$ for some constant $k$ (it means that the microsopical charge carriers are identical). (d) We eventaully associate a current $J^a = \rho V^a$ to the gas. In this situation, the total stress-energy tensor is $$T^{ab} = T_M^{ab}+ T_{EM}^{ab}$$ (in this concrete case no third interaction term shows up on the right-hand side in view of the physical simplicity of the system). We should also assume that the Maxwell equations are satisfied and that the field is completely generated by the said charges. $$\nabla_a F^{ab} = -J^b\:; \quad \epsilon^{abcd}\nabla_b F_{cd}=0\:.$$ Notice that from the former, since $F$ is antysimmetric, we infer both the conservation of the charge and the one of the mass ($k$ is constant!), simply taking the divergence of both sides. A quite lengthy computation proves that $$\nabla_a T^{ab} =0$$ is equivalent to $$\nabla_a T_M^{ab} = F_L^b\:, \quad F_L^b= - J_a F^{ab}$$ the latter density of four-force being just the Lorentz one. In turn, the former equation, taking the conservation of the mass into account, is equivalent to $$\mu V^a\nabla_a V^b = F^b_L\:.$$ This identity asserts that the integral lines of the field of four-velocities evolves according to the presence of the Lorentz force. Notice that, in the absence of the EM field, we would instead have the geodesic equation $$V^a\nabla_a V^b =0\:.$$
{ "domain": "physics.stackexchange", "id": 70739, "tags": "electromagnetism, special-relativity, stress-energy-momentum-tensor" }
What exactly does "Per Unit" refer to?
Question: I am currently reading about Gradient , curl and Divergence in Electromagnetics. Here I don't understand a term called " Per unit ". for example , Divergence is the net outward flux per unit volume . here what is "per unit volume" means ? Answer: Per unit volume is just 'one quantity' of whatever unit you are using to measure volume. If you are using cubic meters as units, then 'flux per unit volume' would be $flux/m^{3}$, meaning there is that much flux for every 'one unit of volume' (for every one cubic meter).
{ "domain": "physics.stackexchange", "id": 77165, "tags": "electromagnetism, vectors, vector-fields" }
Induced Voltage in a Radial Magnetic Field
Question: We have a radial magnetic field, say $$B= \frac{k}{\sqrt{x^2+y^2}}(\cos{\theta}\cdot\hat{i}+\sin\theta\cdot\hat{j})$$ and we have a conducting ring, with its axis along the $z$ axis. What happens if we give this ring a velocity in the $z$ direction? From Faraday's Law, there is no change in flux, and hence there should be no induced EMF. But on each element, there is a magnetic Lorentz Force acting on the electrons along the ring, so they should move, and there should be a current which implies that there is an EMF. Answer: From Faraday's Law, there is no change in flux The magnetic field you propose violates Maxwell's equation $$ \nabla \cdot \mathbf B = 0 $$ on the line $(x=0,y=0)$, which means flux cannot be uniquely assigned to the ring, only to some specific choice of surface attached to the ring. If that surface is plane disk, then flux is zero, but if that surface is a long cylinder hat, then flux is not zero and is increasing as the ring moves along $z$. and hence there should be no induced EMF. Correct, but for different reason: induced EMF in general is due to induced electric field, which is not present in this case at all (vanishes everywhere). What you meant to say is that due to zero change in flux, there should be no EMF at all . That is not true (because we can't say flux is not changing, because there isn't unique way to assign flux). In the hypothetical case the magnetic field was as you proposed (which would be a major discovery contradicting standard EM theory), there would be motional EMF $$ \oint_{ring} (\mathbf v \times \mathbf B) \cdot d\mathbf l \neq 0 $$ and Faraday's law wouldn't be obeyed (because there would be no unique flux).
{ "domain": "physics.stackexchange", "id": 81289, "tags": "electromagnetism, magnetic-fields, electric-current, electromagnetic-induction" }
How to come up with Simon's Algorithm circuit for this 3 qubit system? (given truth table & s)
Question: I have this truth table, knowing that the S = 110. However, how do I come up with a circuit by recognizing the pattern? It has taken me hours but my 2 brain cells don't seem to get it... I know there must be an intuitive guide to finding a pattern when implementing in circuits, but what is it? Is there a general rule of thumb for solving this? Answer: I'm not saying this is a good way of doing it, but if you're completely stumped, there is always a fallback: use a multi-controlled-not. For example, if you use controlled-controlled-controlled-not, you can choose which of the 3 target bits to set to 1 (the second) if all three input bits are 1. Then, you flip the third input bit and apply another controlled-controlled-controlled-not. This applies an X if the original input as 110, so we do two of those, one targetting the first qubit, and one targetting the third. And so on... (There are much better ways in this particular case. For instance, compare the xor of the 3 input bits with your intended first output bit. Also, compare your third input bit and the second output bit.)
{ "domain": "quantumcomputing.stackexchange", "id": 5007, "tags": "circuit-construction, quantum-circuit, simons-algorithm" }
Pythonic code to convert 3 digit number into all possible letter combinations
Question: Given a dictionary where 1:a , 2:b ... 26:z. I need to find all the possible letter combinations that can be formed from the three digits. Either each digit should translate to a letter individually or you can combine adjacent digits to check for a letter. You can't change the order of the digits. For example - 121 translates to aba, au, la; 151 translates to aea, oa; 101 translates to ja; I was able to get this working but I feel my code is not very "pythonic". I am trying to figure out a more efficient & python-like solution for this problem. # creating the dict that has keys as digits and values as letters root_dict = {} for num in range(0,26): root_dict[str(num+1)] = string.ascii_lowercase[num] # asking user for a three digit number sequence_to_convert = raw_input('Enter three digit number \n') # storing all possible permutations from the three digit number first_permutation = sequence_to_convert[0] second_permutation = sequence_to_convert[1] third_permutation = sequence_to_convert[2] fourth_permutation = sequence_to_convert[0]+sequence_to_convert[1] fifth_permutation = sequence_to_convert[1]+sequence_to_convert[2] # checking if the permutations exist in the dict, if so print corresponding letters if first_permutation in root_dict and second_permutation in root_dict and third_permutation in root_dict: print root_dict[first_permutation]+root_dict[second_permutation]+root_dict[third_permutation] if first_permutation in root_dict and fifth_permutation in root_dict: print root_dict[first_permutation]+root_dict[fifth_permutation] if fourth_permutation in root_dict and third_permutation in root_dict: print root_dict[fourth_permutation]+root_dict[third_permutation] Answer: First, Python 2 is going to be no longer supported in less than a year. If you are starting to learn Python now, learn Python 3. In your code the only differences are that print is a function now and no longer an expression (so you need ()) and that raw_input was renamed input (and the Python 2 input basically no longer exists). Your building of the root dictionary can be simplified a bit using a dictionary comprehension: from string import ascii_lowercase num_to_letter = {str(i): c for i, c in enumerate(ascii_lowercase, 1)} For the first three different permutations you can use tuple unpacking: first, second, third = sequence_to_convert Note that you are currently not validating if the user entered a valid string. The minimum you probably want is this: from string import digits digits = set(digits) sequence_to_convert = input('Enter three digit number \n') if len(sequence_to_convert) != 3: raise ValueError("Entered sequence not the right length (3)") if not all(x in digits for x in sequence_to_convert): raise ValueError("Invalid characters in input (only digits allowed)") (A previous version of this answer used str.isdigit, but that unfortunately returns true for digitlike strings such as "¹"...) Your testing and printing can also be made a bit easier by putting the possible permutations into a list and iterating over it: permutations = [(first, second, third), (first, fifth), (fourth, third)] for permutation in permutations: if all(x in num_to_letter for x in permutation): print("".join(map(num_to_letter.get, permutation))) However, in the end you would probably want to make this more extendable (especially to strings longer than three). For that you would need a way to get all possible one or two letter combinations, and that is hard. It is probably doable with an algorithm similar to this one, but it might be worth it to ask a question on Stack Overflow about this.
{ "domain": "codereview.stackexchange", "id": 34083, "tags": "python" }
Vacuum Tank Emptying Transient Equation
Question: I am trying to derive the differential equation for pulling vacuum on an air tank. The solution to the ODE: $$P(t)=P_2 \cdot exp(\frac{dV}{dt} \cdot \frac{t}{V_s})$$ Where $\frac{dV}{dt}$= flow rate out of tank in ACFM (assuming constant for all time t), t = time in minutes, $V_s$ = tank volume in cubic feet, $P_2$ = final pressure, $P_1$ = pressure at time t Assumptions: ideal gas, constant gas flow rate My derivation begins with writing the following differential equation: $$dP=P(1−\frac{dV}{V_s})$$ How do I get to the solution from this? Answer: I did get the same result but started from this: $$\frac{dP}{ P } = (\frac{dV}{dt} / Vs) * dt$$ Then integrating both sides $$ ∫\frac{1}{P} dP = ∫ (\frac{dV}{dt} / Vs) * dt$$ Integrating: $$ln|P| = (\frac{dV}{dt} / Vs) * t + C1$$ Now, we'll exponentiate both sides: $$P = exp((\frac{dV}{dt} / Vs) * t + C1)$$ we drop the absolute value for P: it's always positive. Combine the constants C1 and exp(C1) into one constant, let's call it C: $$P = C * exp((\frac{dV}{dt} / Vs) * t )$$ This is your answer by replacing C with its boundary value which is $P_2$. $$P = P_2 * exp((\frac{dV}{dt} / Vs) * t )$$
{ "domain": "engineering.stackexchange", "id": 5248, "tags": "fluid-mechanics, thermodynamics" }
In vertical circular motion of a mass attached to a rope, where does the elastic potential energy stored in the tension goes?
Question: When a bob is attached to a rope where the rope has negligible mass, the bob undergoing circular motion will have its KE1+PE1 at any point of the circular motion equal to KE2+PE2 at another point of the circular motion; however what happened to the elastic potential energy stored in the tension? When the bob is at the bottom of the circular motion, tension will be maximum; whereas when the bob is at the top of the circular, tension will be minimum (or even zero). From my understanding, if there is a tension in a rope, then there should be elastic potential energy stored so then where did the elastic potential energy stored in the rope go when the bob is at the top of the circular motion? Answer: I think you are just missing a key point. From my understanding, if there is a tension in a rope, then there should be elastic potential energy stored so then where did the elastic potential energy stored in the rope go when the bob is at the top of the circular motion? Elastic energy occurs when objects are compressed or stretched.The presence of tension doesn't necessarily imply that there will be elastic potential energy.In general cases of vertical circular motion we assume that string is inextensible and therefore we don't have to consider any elastic energy. Note: It is a well-known fact that no body is perfectly rigid. So,theoretically there might be a little extension in the string but practically it doesn't matter in general cases. Reference: https://en.wikipedia.org/wiki/Elastic_energy
{ "domain": "physics.stackexchange", "id": 53076, "tags": "newtonian-mechanics, energy, energy-conservation, potential-energy" }
What state of matter is Lightening?
Question: I know of solid, liquid and gaseous states of matter but what state of matter is lightning? Is it gas, liquid or solid? Answer: Thunder is a sonic boom, generated by the rapid heating of the atmosphere by the lightning discharge. The heat front moves faster than sound, generating the sonic boom. Thus what you hear is a pressure wave, and it can be carried by plasma, gas, liquid, or solid: by all of the states of matter.
{ "domain": "physics.stackexchange", "id": 28287, "tags": "acoustics, plasma-physics, matter, lightning" }
Why do water particles appear to be exiting a stream of water from a faucet, instead of the stream being held together by surface tension?
Question: I recently noticed an interesting phenomenon while watching the tap running in my kitchen. At the time, there was bright sunlight coming in the kitchen window. As the stream was running out of the tap into the sink the sunlight was shining directly on to it. This allowed me to observe that there were a constant appearance of water particles appearing to exit the stream for about three or four inches (approx. 10 cm) before falling into the sink. This appeared to be happening throughout the full length of the stream. What could be causing this to happen? Shouldn't the surface tension hold the stream together? Answer: Did your faucet nozzle contain a bubbler (which mixes air into the water and produces a white, frothy stream down to the bottom of the sink)? Most kitchen faucets do, and when the bubbles closest to the surface of the stream pop, they spit out a tiny droplet of water with surprising speed.
{ "domain": "physics.stackexchange", "id": 98150, "tags": "fluid-dynamics, everyday-life, flow, surface-tension" }
lambda calculus as a type theory
Question: From the Introduction section of Homotopy Type Theory book: Type theory was originally invented by Bertrand Russell ... It was later developed as a rigorous formal system in its own right(under tha name "$\lambda$-calculus"). Can anyone explain me this sentence? I'm having trouble seeing $\lambda$-calculus as a type system. (also in the same paragraph, $\lambda$-calculus called as "Church's type system") Answer: In the very next paragraph of that book (see HoTT, p. 2) they give an informal characterization of what qualifies a language system (or logic) as a type system, namely that: in it all terms are typed. By "Church's type system" they're referring to the simply typed lambda calculus (introduced in 1940). In 1930s, in particular while preparing his negative solution to the decision problem, Church had introduced a system of untyped lambda calculus. This, of course, was not a type theory. The 1940 system, however, is a type theory, because in it all terms are typed (see, for example (T&S), Section 1.2 for how exactly this is done). By "lambda-calculus as a type system" they're referring to this second, simply typed version of lambda calculus. (T&S) Troelstra, A.S. & Schwichtenberg, H. Basic Proof Theory, 1996.
{ "domain": "cs.stackexchange", "id": 1663, "tags": "lambda-calculus, type-theory" }
Advice to improve code in simple webpage
Question: I'm trying to make a few webpages that will allow us in the tech support team at the factory I work at to provide support faster by adding a simple interface to actions we perform everyday directly on the database. The following is for a simple page where we will enter a list of serial numbers and select from a few choices of actions to perform on them (actions are mostly executing a stored procedure or query in order to change something in the database). It consists of a texbox where we will enter a list of serials, a dropdown list and a checkbox. These basically move the serials to a diferent location and remove its flag as a finished product in the database. It also has two gridviews to show the serial data before and after the change. All this is inside panels to make each visible/invisible easily depending on where we are in the execution. The actuall calls to the database are in a different class which i'm not including here but are very straightforward parametrized queries and stored procedures. I'm by no means an expert programmer but very interested in moving my career in that direction. I would like to know how a more experienced professional would have done this. This is the code behind the webpage. Any comments are welcome, thank you very much. namespace UnpackTool { public partial class _Default : System.Web.UI.Page { protected void Page_Load(object sender, EventArgs e) { } #region ViewStates private List<string> CRIDS { get { if (ViewState["CRIDS"] != null ) { return (List<string>)ViewState["CRIDS"]; } return null; } set { ViewState["CRIDS"] = value; } } private List<string> SerialNumbers { get { if (ViewState["SerialNumbers"] != null) { return (List<string>)ViewState["SerialNumbers"]; } return null; } set { ViewState["SerialNumbers"] = value; } } private DataTable SerialData { get { if (ViewState["SerialData"] != null) { return (DataTable)ViewState["SerialData"]; } return null; } set { ViewState["SerialData"] = value; } } #endregion #region Helpers //Clear viewstate values and grids to start over private void ClearSerials() { CRIDS = null; SerialNumbers = null; SerialData = null; CRIDS = null; SearchGrid.DataSource = null; ResultsGrid.DataSource = null; SearchGrid.DataBind(); ResultsGrid.DataBind(); } // Switch view between panels according to current state public void SwitchView(string sender) { if (sender == "Search") { Panel1.Visible = false; Panel2.Visible = true; Panel3.Visible = false; SerialBox.Enabled = false; } if (sender == "Cancel") { Panel1.Visible = true; Panel2.Visible = false; Panel3.Visible = false; SerialBox.Enabled = true; UnpackCheck.Checked = false; ProcessList.SelectedValue = "NO CHANGE"; HideErrorMessage(); } if (sender == "Accept") { Panel1.Visible = false; Panel2.Visible = false; Panel3.Visible = true; SerialBox.Enabled = false; } if (sender == "Return") { Panel1.Visible = true; Panel2.Visible = false; Panel3.Visible = false; SerialBox.Enabled = true; SerialBox.Text = ""; UnpackCheck.Checked = false; ProcessList.SelectedValue = "NO CHANGE"; HideErrorMessage(); } } private void ShowErrorMessage(string Error) { ErrorLabel.Text = Error; ErrorLabel.ForeColor = System.Drawing.Color.Red; ErrorLabel.Visible = true; } private void HideErrorMessage() { ErrorLabel.Text = ""; ErrorLabel.ForeColor = System.Drawing.Color.Black; ErrorLabel.Visible = false; } #endregion #region Events //Read from textbbox and get serial info from db protected void SearchBtn_Click(object sender, EventArgs e) { GetSerials(); SwitchView("Search"); //Only get serial info from db if actual data is entered in textbox if (SerialNumbers.Count > 0) { GetSerialData(); GetModelProcess(); } } //Check if serials should be moved and/or unpacked, and perform actions protected void AcceptBtn_Click(object sender, EventArgs e) { //only do something if valid data is entered if (SerialNumbers.Count > 0) { try { //If Unpack is checked, serials are unpacked if (UnpackCheck.Checked) { UnpackSerials(); } //When value is NO CHANGE, serials arent moved if (ProcessList.SelectedValue.ToString() != "NO CHANGE") { MoveSerials(); } //Get serial info again to see results GetSerialData(); SwitchView("Accept"); HideErrorMessage(); } catch (Exception ex) { ShowErrorMessage(ex.Message); } } } //Reset view protected void CancelBtn_Click(object sender, EventArgs e) { SwitchView("Cancel"); ClearSerials(); } //Reset view protected void ReturnBtn_Click(object sender, EventArgs e) { SwitchView("Return"); ClearSerials(); } #endregion #region DataAccess //Get serial info from db private void GetSerials() { //If CrateID is selected search by CRID if (InputType.SelectedValue.ToString() == "CrateID") { //Read from textbox into CRID viewstate list CRIDS = new List<string>(SerialBox.Text.Split(new string[] { "\r\n" }, StringSplitOptions.RemoveEmptyEntries)); //Method received list of CRID and returns list of serials SerialNumbers = Dal.ExecuteCRIDSerialQuery(CRIDS).AsEnumerable().Select(x => x[0].ToString()).ToList(); } else { //Read from textbox into SerialNumber viewstate list SerialNumbers = new List<string>(SerialBox.Text.Split(new string[] { "\r\n" }, StringSplitOptions.RemoveEmptyEntries)); } } private void UnpackSerials() { //Unpack method receives list, returns nothing Dal.ExecuteUnpackQuery(SerialNumbers); } private void MoveSerials() { //Move serials, executes stored procedure, receives single serialnumber, iterate through list executing for each if (SerialData.Rows.Count > 0) { foreach (DataRow row in SerialData.Rows) { Dal.ExecuteMoveQuery(row["TSN"].ToString(), row["Model"].ToString(), ProcessList.SelectedValue.ToString()); } } } private void GetSerialData() { DataTable serialdata = new DataTable(); //Get serial data from db, method receives list of serials, returns data table SerialData = Dal.ExecuteResultsQuery(SerialNumbers); //Fill datagrids SearchGrid.DataSource = SerialData; ResultsGrid.DataSource = SerialData; SearchGrid.DataBind(); ResultsGrid.DataBind(); } //Gets processed used by the models listed in the serial data, can receive different models, binds to dropdownlist private void GetModelProcess() { if (SerialNumbers != null) { DataTable Processes = Dal.ExecuteProcessByModelQuery(SerialData.AsEnumerable().Select(x => x["Model"].ToString()).ToList()); ProcessList.DataSource = Processes; ProcessList.DataTextField = "Process_Name"; ProcessList.DataValueField = "Process_Name"; ProcessList.DataBind(); //When value is NO CHANGE, serials arent moved ProcessList.SelectedValue = "NO CHANGE"; } } #endregion } } Answer: A few examples of opportunities to: Be more concise with the flow of control to improve readability and reduce the likelyhood of introducing bugs later on Breaking out/abstracting some operations to prevent duplication Conditionals The SwitchView() method will pass though each if statement even though it will match either none or one. In some cases avoiding unnecessary conditionals might be a performance consideration, but here it's just to be concise and improve readability: You could use either a series of if() {} else if() {}'s: if (condition1) { //stuff } else if (condition2) { //stuff } else if (condition3) { //stuff } else { //nothing matched } Or in this case as you're checking only for string equality a switch as follows (generally worth thinking about case sensitivity here as well): public void SwitchView(string sender) { switch (sender) { case "Search": Panel1.Visible = false; Panel2.Visible = true; Panel3.Visible = false; SerialBox.Enabled = false; break; case "Cancel": Panel1.Visible = true; Panel2.Visible = false; Panel3.Visible = false; SerialBox.Enabled = true; UnpackCheck.Checked = false; ProcessList.SelectedValue = "NO CHANGE"; HideErrorMessage(); break; case "Accept": Panel1.Visible = false; Panel2.Visible = false; Panel3.Visible = true; SerialBox.Enabled = false; break; case "Return": Panel1.Visible = true; Panel2.Visible = false; Panel3.Visible = false; SerialBox.Enabled = true; SerialBox.Text = ""; UnpackCheck.Checked = false; ProcessList.SelectedValue = "NO CHANGE"; HideErrorMessage(); break; //optionally you could handle a situation of an unexpected sender as follows //default: // throw new Exception("Unexpected sender '" + sender + "' in SwitchView()"); } } To remove some of those strings you might also define an enum like this giving you a strongly typed method of indicating what kind of action is triggering the view change: public enum ActionViewStyle { Search, Cancel, Accept, Return }; public void SwitchView(ActionViewStyle sender) { switch (sender) { case ActionViewStyle.Search: Panel1.Visible = false; //etc... } } Magic strings/numbers It's often considered good practice to avoid the use of magic strings and numbers where practical. For example, the following is easier to read, you can find all references, and prevents bugs introduced by a typo in some instance of the string: private const string VS_KEY_CRIDS = "CRIDS"; private const string VS_KEY_SERIAL_NUMBERS = "SerialNumbers"; private const string VS_KEY_SERIAL_DATA = "SerialData"; private const string MSG_PROCS_NO_CHANGE = "NO CHANGE"; private const string COL_NAME_TSN = "TSN"; private const string COL_NAME_MODEL = "Model"; //etc... private List<string> CRIDS { get { if (ViewState[VS_KEY_CRIDS] != null) { return (List<string>)ViewState[VS_KEY_CRIDS]; } return null; } set { ViewState[VS_KEY_CRIDS] = value; } } You might also consider the approach for other strings reused throughout the class. There are a few approaches to storing strings/numbers, probably try to make a decision balancing the benefits abstracting the configuration, vs the complexity it introduces to what might otherwise be a trivial operation. Some options are: Local constants (above). Easy, simple, not so flexible Static class which can be referenced elsewhere External/independent configuration (file, database, etc) Splitting Strings I notice you're splitting up the user input like this in a few places: new List<string>(SerialBox.Text.Split(new string[] { "\r\n" }, StringSplitOptions.RemoveEmptyEntries)); Nothing wrong with this, but you might consider breaking out the delimiter definition to a single location you can refer to, and the operation of getting a list of values from a delimited string into it's own method if it's something you'll be doing a lot: //static string[] valueDelimiters = new string[] { Environment.NewLine }; static string[] valueDelimiters = new string[] { "\r\n", "\n" }; //if you want to handle LF and CRLF private IEnumerable<string> GetValuesAsList(string values, string [] delimiters) { return values.Split(valueDelimiters, StringSplitOptions.RemoveEmptyEntries).ToList(); } private void GetSerials() { //If CrateID is selected search by CRID if (InputType.SelectedValue.ToString() == "CrateID") { //Read from textbox into CRID viewstate list CRIDS = GetValuesAsList(SerialBox.Text, valueDelimiters); //Method received list of CRID and returns list of serials SerialNumbers = Dal.ExecuteCRIDSerialQuery(CRIDS).AsEnumerable().Select(x => x[0].ToString()).ToList(); } else { //Read from textbox into SerialNumber viewstate list SerialNumbers = GetValuesAsList(SerialBox.Text, valueDelimiters); } } Input Sanitising As you're taking user input from SerialBox and passing it into a data access layer which may be executing some sql query, it's worth checking to make sure that you're guarding against injection with something like parameterised queries Error Handling Looking at the calls into the data access layer, I'm just wondering if they might ever throw an exception (data repository unreachable, malformed query, bad data, etc). If so it would be a worthwhile investment in handling them and logging/display an appropriate message.
{ "domain": "codereview.stackexchange", "id": 5400, "tags": "c#" }
Is this is good way to check for null?
Question: I ran into the code below today, and my gut instinct tells me this is an expensive way to do a null check. The point the author was making was that if you changed the name of the object then you don't need to change the value of the string being thrown in the exception. The proposed use would be: string cantBeNull=...; Guard.IsNotNull(cantBeNull); //instead of if(cantBeNull == null) throw new ArgumentNullException("cantBeNull"); So Is this an acceptable way of checking for null? Is this overly expensive just to save you from not having the change the value passed to the exception? Code in question: public static void IsNotNull<T>(Expression<Func<T>> expression) where T : class { if (expression == null) throw new ArgumentNullException("expression"); var value = expression.Compile()(); var param = (MemberExpression)expression.Body; var paramName = param.Member.Name; if (value != null) return; throw new ArgumentNullException(paramName); } Answer: How often do you change parameter names? How often does the code run? Under any normal distribution of those, you should go with a minor code efficiency gain over a programmer efficiency gain. On the other hand, if you (for some bizarre reason) change the parameter name almost every time the code runs, the tradeoff is probably worth it. But just remember, that whether or not the string matches the actual parameter name, it should still let you find exactly which parameter was null so long as there isn't anywhere else in that function which throws an ArugmentNullException with the same string.
{ "domain": "codereview.stackexchange", "id": 2754, "tags": "c#" }
Are magnetic fields just modified relativistic electric fields?
Question: Feynman's Lectures, Volume 2, says that the electromagnetic force is invariant in any reference frame, and the magnetic force in one frame becomes the electric field in another. And Wikipedia says: That is, the magnetic field is simply the electric field, as seen in a moving coordinate system. Can we then say that the magnetic field is just a modified "relativistic" version of the electric field? Answer: In above Figure-02 an inertial system $\:\mathrm S'\:$ is translated with respect to the inertial system $\:\mathrm S\:$ with constant velocity \begin{equation} \boldsymbol{\upsilon}=\left(\upsilon_{1},\upsilon_{2},\upsilon_{3}\right)=\left(\upsilon \mathrm n_{1},\upsilon \mathrm n_{2},\upsilon \mathrm n_{3}\right)=\upsilon \mathbf n\,, \qquad \upsilon \in \left(-c,c\right) \tag{01} \end{equation} The Lorentz transformation is \begin{align} \mathbf{x}^{\boldsymbol{\prime}} & = \mathbf{x}+(\gamma-1)(\mathbf{n}\boldsymbol{\cdot} \mathbf{x})\mathbf{n}-\gamma \boldsymbol{\upsilon}t \tag{02a}\\ t^{\boldsymbol{\prime}} & = \gamma\left(t-\dfrac{\boldsymbol{\upsilon}\boldsymbol{\cdot} \mathbf{x}}{c^{2}}\right) \tag{02b} \end{align} in differential form \begin{align} \mathrm d\mathbf{x}^{\boldsymbol{\prime}} & = \mathrm d\mathbf{x}+(\gamma-1)(\mathbf{n}\boldsymbol{\cdot} \mathrm d\mathbf{x})\mathbf{n}-\gamma\boldsymbol{\upsilon}\mathrm dt \tag{03a}\\ \mathrm d t^{\boldsymbol{\prime}} & = \gamma\left(\mathrm d t-\dfrac{\boldsymbol{\upsilon}\boldsymbol{\cdot} \mathrm d\mathbf{x}}{c^{2}}\right) \tag{03b} \end{align} and in matrix form \begin{equation} \mathbf{X}^{\boldsymbol{\prime}}= \begin{bmatrix} \mathbf{x}^{\boldsymbol{\prime}}\vphantom{\dfrac{\gamma\boldsymbol{\upsilon}^{\boldsymbol{\top}}}{c}}\\ c t^{\boldsymbol{\prime}}\vphantom{\dfrac{\gamma\boldsymbol{\upsilon}^{\boldsymbol{\top}}}{c}} \end{bmatrix} = \begin{bmatrix} \mathrm I+(\gamma-1)\mathbf{n}\mathbf{n}^{\boldsymbol{\top}} & -\dfrac{\gamma\boldsymbol{\upsilon}}{c} \vphantom{\dfrac{\gamma\boldsymbol{\upsilon}^{\boldsymbol{\top}}}{c}}\\ -\dfrac{\gamma\boldsymbol{\upsilon}^{\boldsymbol{\top}}}{c} & \hphantom{-}\gamma \end{bmatrix} \begin{bmatrix} \mathbf{x}\vphantom{\dfrac{\gamma\boldsymbol{\upsilon}^{\boldsymbol{\top}}}{c}}\\ c t\vphantom{\dfrac{\gamma\boldsymbol{\upsilon}^{\boldsymbol{\top}}}{c}} \end{bmatrix} =\mathrm L\mathbf{X} \tag{04} \end{equation} where $\:\mathrm L\:$ the real symmetric $\:4\times 4\:$ matrix \begin{equation} \mathrm L \equiv \begin{bmatrix} \mathrm I+(\gamma-1)\mathbf{n}\mathbf{n}^{\boldsymbol{\top}} & -\dfrac{\gamma\boldsymbol{\upsilon}}{c} \vphantom{\dfrac{\gamma\boldsymbol{\upsilon}^{\boldsymbol{\top}}}{c}}\\ -\dfrac{\gamma\boldsymbol{\upsilon}^{\boldsymbol{\top}}}{c} & \hphantom{-}\gamma \end{bmatrix} \tag{05} \end{equation} and \begin{equation} \mathbf{n}\mathbf{n}^{\boldsymbol{\top}} = \begin{bmatrix} \mathrm n_{1}\vphantom{\dfrac{}{}}\\ \mathrm n_{2}\vphantom{\dfrac{}{}}\\ \mathrm n_{3}\vphantom{\dfrac{}{}} \end{bmatrix} \begin{bmatrix} \mathrm n_{1} & \mathrm n_{2} & \mathrm n_{3} \end{bmatrix} = \begin{bmatrix} \mathrm n_{1}^{2} & \mathrm n_{1}\mathrm n_{2} & \mathrm n_{1}\mathrm n_{3}\vphantom{\dfrac{}{}}\\ \mathrm n_{2}\mathrm n_{1} & \mathrm n_{2}^{2} & \mathrm n_{2}\mathrm n_{3}\vphantom{\dfrac{}{}}\\ \mathrm n_{3}\mathrm n_{1} & \mathrm n_{3}\mathrm n_{2} & \mathrm n_{3}^{2}\vphantom{\dfrac{}{}} \end{bmatrix} \tag{06} \end{equation} a matrix representing the vectorial projection on the direction $\:\mathbf{n}$. The electromagnetic field is an entity and this is more clear if we take a look at its transformation. So, for the Lorentz transformation (02), the vectors $\:\mathbf{E}\:$ and $\:\mathbf{B}\:$ are transformed as follows \begin{align} \mathbf{E}' & =\gamma \mathbf{E}\!-\!\left(\gamma\!-\!1\right)\left(\mathbf{E}\boldsymbol{\cdot}\mathbf{n}\right)\mathbf{n}+\,\gamma\left(\boldsymbol{\upsilon}\boldsymbol{\times}\mathbf{B}\right) \tag{07a}\\ \mathbf{B}' & = \gamma \mathbf{B}\!-\!\left(\gamma\!-\!1\right)\left(\mathbf{B}\boldsymbol{\cdot}\mathbf{n}\right)\mathbf{n}\!-\!\dfrac{\gamma}{c^{2}}\left(\boldsymbol{\upsilon}\boldsymbol{\times}\mathbf{E}\right) \tag{07b} \end{align} Now, let the Lorentz force 3-vector on a point particle with charge $\:q\:$ moving with velocity $\:\mathbf{u}\:$ with respect to $\:\mathrm S\:$ \begin{equation} \mathbf{f} = q\left(\mathbf{E}+\mathbf{u}\boldsymbol{\times}\mathbf{B}\right) \tag{08} \end{equation} This Lorentz force 3-vector with respect to $\:\mathrm S'\:$ is \begin{equation} \mathbf{f'} = q\left(\mathbf{E'}+\mathbf{u'}\boldsymbol{\times}\mathbf{B'}\right) \tag{09} \end{equation} Note that the value of the charge $\:q\:$ of a point particle is by hypothesis the same in all inertial systems (a scalar invariant), while the velocity 3-vector $\:\mathbf{u}\:$ is transformed as follows \begin{equation} \mathbf{u}^{\boldsymbol{\prime}} = \dfrac{\mathbf{u}+(\gamma-1)(\mathbf{n}\boldsymbol{\cdot} \mathbf{u})\mathbf{n}-\gamma \boldsymbol{\upsilon}}{\gamma \left(1-\dfrac{\boldsymbol{\upsilon}\boldsymbol{\cdot} \mathbf{u}}{c^{2}}\right)} \tag{10} \end{equation} equation proved by dividing equations (03a), (03b) side by side and setting $\:\mathbf{u}\equiv \mathrm d\mathbf{x}/\mathrm d t\:$, $\:\mathbf{u'}\equiv \mathrm d\mathbf{x'}/\mathrm d t'$. Now, if in (09) we replace $\:\mathbf{E'},\mathbf{B'},\mathbf{u'}\:$ by their expressions (07a),(07b) and (10) respectively, then we end up with the following relation between the force 3-vectors \begin{equation} \mathbf{f}^{\boldsymbol{\prime}} = \dfrac{\mathbf{f}+(\gamma-1)(\mathbf{n}\boldsymbol{\cdot} \mathbf{f})\mathbf{n}-\gamma \boldsymbol{\upsilon}\left(\dfrac{\mathbf{f}\boldsymbol{\cdot}\mathbf{u}}{c^{2}}\right)}{\gamma \left(1-\dfrac{\boldsymbol{\upsilon}\boldsymbol{\cdot}\mathbf{u}}{c^{2}}\right)} \tag{11} \end{equation} wherein the quantities of the electromagnetic field $\:\color{red}{\bf DISAPPEARED !!!}$ That's why in the early years of Special Relativity transformation (11) was believed to be valid for any force at least of the same type as the Lorentz force (more exactly for any force that doesn't change the rest mass of the particle). Following the same path by which we construct from (10) the velocity 4-vector $\:\mathbf{U}\:$ \begin{equation} \mathbf{U} =\left(\gamma_{\mathrm u}\mathbf{u}, \gamma_{\mathrm u}c\right) \tag{12} \end{equation} we construct also from (11) the force 4-vector $\:\mathbf{F}\:$ \begin{equation} \mathbf{F} =\left(\gamma_{\mathrm u}\mathbf{f}, \gamma_{\mathrm u}\dfrac{\mathbf{f}\boldsymbol{\cdot}\mathbf{u}}{c}\right) \tag{13} \end{equation} Lorentz transformed \begin{equation} \mathbf{F'} = \mathrm L \mathbf{F} \tag{14} \end{equation} or \begin{equation} \mathbf{F}^{\boldsymbol{\prime}}= \begin{bmatrix} \gamma_{\mathrm u'}\mathbf{f'}\vphantom{\dfrac{\gamma\boldsymbol{\upsilon}^{\boldsymbol{\top}}}{c}}\\ \gamma_{\mathrm u'}\dfrac{\mathbf{f'}\boldsymbol{\cdot}\mathbf{u'}}{c} \end{bmatrix} = \begin{bmatrix} \mathrm I+(\gamma-1)\mathbf{n}\mathbf{n}^{\boldsymbol{\top}} & -\dfrac{\gamma\boldsymbol{\upsilon}}{c} \vphantom{\dfrac{\gamma\boldsymbol{\upsilon}^{\boldsymbol{\top}}}{c}}\\ -\dfrac{\gamma\boldsymbol{\upsilon}^{\boldsymbol{\top}}}{c} & \hphantom{-}\gamma \end{bmatrix} \begin{bmatrix} \gamma_{\mathrm u}\mathbf{f}\vphantom{\dfrac{\gamma\boldsymbol{\upsilon}^{\boldsymbol{\top}}}{c}}\\ \gamma_{\mathrm u}\dfrac{\mathbf{f}\boldsymbol{\cdot}\mathbf{u}}{c} \end{bmatrix} =\mathrm L\mathbf{F} \tag{15} \end{equation}
{ "domain": "physics.stackexchange", "id": 49732, "tags": "electromagnetism, special-relativity, electromagnetic-radiation, magnetic-fields, inertial-frames" }
Gazebo fortress won't load once I add Contact plugin
Question: I have this sdf file: <world name="rl_world"> <include> <uri>path/to/plane.sdf</uri> </include> <include> <uri>/path/to/robot.urdf</uri> </include> <plugin filename="libignition-gazebo6-contact-system" name="ignition::gazebo::systems::Contact"> </plugin> </world> When I try to load in in gazebo with ign gazebo world.sdf it shows a blank grey screen with no entities. It doesn't give an error to the console. If I remove the plugin line it loads fine. Is there something wrong with it? plane.sdf has a contact sensor on its only link and the TouchPlugin. Originally posted by Lestes on Gazebo Answers with karma: 3 on 2023-06-13 Post score: 0 Answer: This is a known issue (https://github.com/gazebosim/gz-sim/issues/796) where having any plugin in your world prevents default plugins from being loaded, so you'll have to add the Physics and ~SystemBroadcaster~ SceneBroadcaster plugins and any other plugins you might need to your world Originally posted by azeey with karma: 704 on 2023-06-13 This answer was ACCEPTED on the original site Post score: 0 Original comments Comment by Lestes on 2023-06-14: Thanks! Working now. Though for anyone in the future it's SceneBroadcaster. I think 'SystemBroadcaster' was a typo.
{ "domain": "robotics.stackexchange", "id": 4709, "tags": "ignition-fortress, contact" }
What is "a vector of $SO(n)$"?
Question: I'm watching (or trying to watch) this lecture from NPTEL on classical field theory. I've understood everything in the series up till this point, including the first half of the lecture on elementary group theory. However, at a certain point he begins talking about a "vector of $SO(d)$". He essentially defines such an object as an d-tuple $\begin{pmatrix}v^1\\v^2\\\dots\\v^d\end{pmatrix}$ such that it transforms as follows under matrices $R\in SO(d)$: $$(v')^i=R^i_{\,j}v^j = \sum_j R_{ij}v_j$$ This is where I began to get confused. Is this not the definition of matrix multiplication by a vector where $v'=Rv$? Because of this I essentially could not make heads or tails of the rest of the lecture, which generalized this idea to a "tensor of $SO(d)$", definining it as an object $T^{i_1i_2\dots i_n}$ which transforms like $$T'^{j_1\dots j_n} = R^{j_1}_{\,i_1}\dots R^{j_1}_{\,i_1}T^{i_1\dots i_n}$$ From here I am essentially lost. Googling relevant terms hasn't been much help - is this a standard notation that I'm not understanding or is it just the cryptic language of this particular lecturer? Answer: The equation you gave is indeed the definition of matrix multiplication, applied to a $d\times d$ matrix and a $d\times 1$ matrix. But the underlying concept is something more. The thing about vectors is that they exist, in some sense, independent of the numbers used to represent them. For example, an ordinary 3D displacement vector represents a physical length and a physical direction. These things are not numbers, they are abstract ideas. You only get the numbers when you choose a coordinate system and then compare the vector to the coordinate axes. Different coordinate systems will give you different sets of numbers for the same vector. Two coordinate systems can be related by transformations, such as rotation and reflection. In other words, given coordinate system A, you can identify some transformation that turns it into coordinate system B, and you can come up with a $d\times d$ matrix, $R_{d\times d}$, that represents that transformation. What makes a vector a vector is that the numbers that describe the vector in coordinate system A and the numbers that describe the vector in coordinate system B are related by the same matrix. $$\begin{pmatrix}v_B^1 \\ \vdots \\ v_B^d\end{pmatrix} = R_{d\times d}\begin{pmatrix}v_A^1 \\ \vdots \\ v_A^d\end{pmatrix}\tag{1}$$ The group of all possible transformations has some name. For example, $SO(3)$ is the group of all rotations in 3D space. Accordingly, anything that behaves as a vector (i.e. it follows equation 1) when you rotate the 3D coordinate system is called a vector of $SO(3)$, or an $SO(3)$ vector. In case this seems like it should be obvious, let me point out that there are sets of quantities which don't behave this way, especially when you start talking about other kinds of transformations besides 3D rotations. For example, all possible Lorentz transformations, including both rotations and boosts, form the group $SO(3,1)$. The energy and momentum (of a single particle) form a vector of $SO(3,1)$, because they change in accordance with equation (1) (with $R_{d\times d}$ being a Lorentz transformation matrix) when you change reference frames. But the electromagnetic field does not. You actually need two factors of $R_{d\times d}$ to account for how EM fields change between reference frames. That makes the EM field a rank-2 tensor of $SO(3,1)$. I would also refer you to this question of mine on Math about the meaning of a "physical vector space", which touches on the difference between a mathematical vector and a physical vector. Only the latter is subject to the requirement of equation (1).
{ "domain": "physics.stackexchange", "id": 18016, "tags": "terminology, vectors, group-theory, group-representations, tensor-calculus" }
A box with cooler and heater on opposite faces
Question: Suppose there's a box with one face cold, and the opposite face hot. So when the air molecules hit the cooler face, it will transfer its momentum and energy to the wall, bouncing back with less momentum. And when molecules hit the hotter face, it will bounce back faster. And the hotter side will be hit more frequently by air molecules. So will the box feel any force due to the temperature difference? Answer: You do not mention what is happening on the outside of the box. If the outsides are also cold one side hot the other, and it is in air, you will get a force. See Crooke's radiometer If this is all internal to the box, no net force: Remember the Dr Who episode, where The Doctor is adrift between his TARDIS and some spaceship? The Doctor throws a cricket ball at the spaceship. This gives him a small momentum towards the TARDIS. The cricket ball bounces off the spaceship (we do not see the small change in velocity of the spaceship) and (this being The Doctor) returns directly towards him. He catches the cricket ball, his velocity towards the TARDIS doubles, and he escapes, looking very smug. OK. Now, say, the Doctor is in a big box, still stranded. He throws his ball at the hot wall, and as a result gains a velocity towards the cold wall. No net motion of the box, until the ball strikes the hot wall. The whole box now has 1 cricket ball's worth of momentum towards the TARDIS. The ball bounces off the hot wall, with a bit of momentum added (perhaps a Vogon caught it and threw it back with force). The box now has two cricket ball's worth, plus the heat energy. The Doctor catches the returned ball, and drifts with double (and a bit of heat energy) towards the cold wall. Until he strikes the cold wall, the whole box will be moving towards the TARDIS. But when he strikes the cold wall, all his momentum, exactly opposite to the box momentum, is given back to the box, and it stops. No net change in the box momentum. event box ball Dr start 0 0 Throw 0 1 -1 StickHot 1(&ball) 0 -1 BounceHot 2+h -(1+h) -1 CatchHot 2+h 0 -(2+h){Dr&Ball} StickCold 0 = 2+h - (2+h) Too many other events to list. Does the Dr stick? Does he stick & toss? Does he toss back and fourth? All, of course, will not result in an average net momentum change of the box. However, if the Dr changes position in the box, that changes the center of gravity, so the box will move a bit. In this way, he escapes by moving the box to where Ramona can grab it. She, being an outside force, can pull the whole contraption in to safety. Say it is just the ball, bouncing between hot and cold. Moving from hot to cold, it will have Mbounce + h. Moving from cold to hot, it will have Mbounce + c. Ah-ha! They are different! But it travels faster with Mbounce+h, T = D*(Mbounce+h)/Mass, so averaging based on time in each state gives Mbounce+h/(Mbounce+h) = Mbounce+c/(Mbounce+c). To translate this to air in a box, set Dr Who to the air mass, the cricket ball to an air molecule, the Vogon to the heat on the hot wall. Average over a billion different directions and positions.
{ "domain": "physics.stackexchange", "id": 8246, "tags": "thermodynamics, statistical-mechanics" }
Trouble with position operator in quantum mechanics
Question: I'm having some trouble with understanding the derivation of the action of the $X$ operator. It seems to be a result of the notation used and not a property of itself. The usual argument is to consider eigenfunctions of the $X$-operator: $X|x\rangle = x|x\rangle$ where $X$ is an operator, $|x\rangle$ is an eigenket of $X$ and $x$ is the corresponding eigenvalue. Then \begin{eqnarray*} \color{red}{\langle x'|}X|x\rangle &=& \color{red}{\langle x'|}x|x\rangle \\ \\ \langle x'|X|x\rangle &=& x\langle x'|x\rangle \\ \\ \langle x'|X|x\rangle &=& x\,\delta(x'-x) \end{eqnarray*} where $\delta$ is Dirac's $\delta$-"function". Then we ask what $X$ does to arbitrary kets like $|f\rangle$: \begin{eqnarray*} (Xf)(x) &=& \langle x | X | f\rangle \\ \\ &=&\int_{-\infty}^{+\infty} \langle x|X|x'\rangle\langle x'|f\rangle~\mathrm dx' \\ \\ &=& \int_{-\infty}^{+\infty}f(x')\color{red}{\langle x|X|x'\rangle}~\mathrm dx' \\ \\ &=& \int_{-\infty}^{+\infty} f(x')\,\color{red}{x'\,\delta(x-x')}~\mathrm dx' \end{eqnarray*} The defining property of the $\delta$-"function" is that $\int_{\mathbb R} f(y)\,\delta(x-y)~\mathrm dy=f(x)$, and so $$(Xf)(x) = x\,f(x)$$ However, if I do this with other symbols, then I can't get the same result. Let's say $X|x\rangle = \lambda |x\rangle$. Then following the same steps gives $\langle x'|X|x\rangle = \lambda \langle x'|x\rangle=\lambda \,\delta(x'-x)$, and whence $$(Xf)(x)=\lambda f(x)$$ This is to be expected: $Xf$ is just an eigenvalue multiple of $f$. It seems that the property that $X : f \mapsto xf$ comes from the fact that we used $x$ to denote the eigenvalue of $X$. What am I missing here? Perhaps because $x$ is a real number and the set of all kets $|x\rangle$ can be identified with the real line by $|x\rangle \mapsto x$, and that $x$ must be an eigenvalue of $|x\rangle$ under this construction? Answer: By saying $X|x\rangle = \lambda |x\rangle$ and then integrating over $x$ without allowing for the fact that $\lambda$ depends on $x$, you're essentially saying that the action of $X$ on all $|x\rangle$ states is the same, so that $X$ is a constant: $$ X = X\int|x\rangle\langle x|\mathrm dx = \int X|x\rangle\langle x|\mathrm dx = \int \lambda|x\rangle\langle x|\mathrm dx = \lambda\int|x\rangle\langle x|\mathrm dx =\lambda\mathbb I. $$ Allow for $\lambda$ to depend on $x$ (i.e. allow for $X$ to have different eigenvalues for different eigenvectors), say, via notation like $\lambda_x$ or similar, and you'll recover the initial behaviour in the case $\lambda_x = x$. That said, from what you've said in comments, the confusion is a bit more fundamental. To be clear: the property $$ (Xf)(x) = xf(x) $$ is the definition of the $X$ operator, at least when working on the explicit instantiation of the abstract Hilbert space in the position-based $L_2(\mathbb R)$ space; as a definition, it hardly needs any justification. Shankar's treatment arises because he starts using an explicit coordinate-based construction of finite-dimensional Hilbert spaces and then decides to keep on using the same notation without showing how the Hilbert-space basis is built, or the theorems that guarantee its existence; this is fine for a first pass at the subject but there are better ways to do it once you're accustomed to the basics. (And that means that you shouldn't take that section as representative of the rigorous ways to construct these mathematical structures.) Frankly, I find that part of Shankar's presentation to be rather confused (not necessarily wrong, as such, but I would encourage you to read other textbooks to get a better feeling for those topics). In particular, Shankar introduces the notation of the $X$ operator as the operator "responsible for the $|x⟩$ basis" - but this is not really necessary. You already have that basis, you don't really need an operator to generate it. (It exists, of course, but you don't need it, in the way Shankar implies.) Either way, at some point you need to commit to one or another definition of $X$: the simplest way is to define it via $(Xf)(x) = xf(x)$, but you can also define it, as Shankar does, as the unique operator whose action on the basis states is $$ X|x⟩=x|x⟩, $$ i.e. to multiply each state by its label (and one can then show, as Shankar does, that the two definitions are equivalent). Now why would you define something like that? because you can, and because it works. Is this purely a trick of the notation? No, there's clear physical content to the assertion that $X|x⟩=x|x⟩$, and if you want to just "change the labels" then you need to do so consistently: you'd need to say something like $X|\lambda⟩=\lambda|\lambda⟩$, while making clear that $|\lambda⟩$ is the same position basis state as $|x⟩$ but with altered notation. And if this was not enough to dig you out, then I would seriously recommend taking up a few more books for alternative perspectives on the topic: there's better ways to do it (or at least, ways that are better in this respect) but there's no space in a Q&A format to develop them fully. Read around. Seriously.
{ "domain": "physics.stackexchange", "id": 41526, "tags": "quantum-mechanics, hilbert-space, notation, eigenvalue, dirac-delta-distributions" }
Is a CFT quantized on flat space trivial?
Question: My understanding of CFT (as taught in an introductory class for example) is that we work in Euclidean signature and quantize on a sphere of radius $R$. The spectrum is given by the conformal weights $\Delta$ of the primary operators (via the state-operator mapping) and their descendants via $$E_n = \frac{\Delta_n}{R}.$$ If we were to instead quantize on a slice of flat space, we would get a trivial spectrum $E_n = 0$ since there is no energy scale. This can also be seen from taking $R\to \infty$ above. So the spectrum on flat space is trivial. Since, in almost all examples I've encountered, a quantum theory is specified by it's spectrum, is it correct to say that CFT quantized on flat space is trivial? Are there any subtleties? Edit: There is a misstatement in this question, that the theory on flat space has a trivial spectrum, as per the comment. (The nonexistence of a mass scale only prohibits a gap, not a spectrum altogether.) This is a point I'd like to understand better. Answer: If you take the limit of $\frac{\Delta_n}{R}$ for $R \to \infty$ with fixed $n$ you do get zero, but keep in mind that you also have infinitely many $\Delta_n$. Thus it might be possible (and is in fact true) that by taking the limit $n \to \infty$ at the same time in appropriate way you end up with a nonzero result for infinite $R$. Then you can immediately conclude that the spectrum is $[0, \infty)$, because by scaling symmetry for every state of energy $E$ there is a state of energy $\lambda E$ for any given $\lambda >0$. Let me give you a simple example: consider the Laplace equation on a circle of length $L$. Its eigenvalues are $\lambda_n(L):= \left( \frac{n}{L} \right)^2$ where $n$ is an integer. For any given $n$ the limit of $\lambda_n(L)$ for $L \to \infty$ is zero. On the other hand, for every finite $L$ the spectrum is not bounded above. It becomes more and more dense in the half-line $[0, \infty)$ as $L$ is increased. In this sense you do recover correct spectrum of the Laplace operator on $\mathbb R$ in the limit $L \to \infty$.
{ "domain": "physics.stackexchange", "id": 67030, "tags": "quantum-mechanics, quantum-field-theory, conformal-field-theory" }
Why is the expectation value of Hermitian conjugate operators $RR^\dagger$ always real and non-negative?
Question: I've been reading through a derivation of the wavefunctions and energy levels for the quantum harmonic oscillator. It defines $$\hat R^\pm=\frac{1}{\sqrt{2}}[\hat p \pm \mathrm{i}\omega \hat q]$$ in mass-weighted coordinates $q=x\sqrt{\mu},$ such that $\hat p= -\mathrm{i}\hbar(\mathrm d/\mathrm dq)$ and $\hat q$ is the position operator. It then asserts that that $\langle \psi|\hat R^+\hat R^-|\psi\rangle$ must be real and non-negative, since $\hat R^+$ and $\hat R^-$ are hermitian conjuagtes. I know that $(\hat A\hat B)^\dagger = \hat B^\dagger \hat A^\dagger$ and so $\hat R^+\hat R^-$ is a hermitian operator meaning that the bra-ket must be real, but I can't see why it should be non-negative. Could anyone shed some light on this? Answer: Let $\hat{R}^-|\psi\rangle = |\psi'\rangle$ (a new ket; we don't care what it is). If you take the adjoint of this equation you get $$\langle\psi'| = \langle\psi|(\hat{R}^-)^\dagger = \langle\psi|\hat{R}^+$$ and hence $$\langle\psi|\hat{R}^+\hat{R}^-|\psi\rangle = \langle\psi'|\psi'\rangle$$ which by definition of the inner product must be real and non-negative.
{ "domain": "chemistry.stackexchange", "id": 8874, "tags": "physical-chemistry, quantum-chemistry" }
Are Cellular Automata always computers?
Question: I was reading on Complex Systems journal and found a paper where the author states that a cellular automaton can be viewed as a computer. In the introduction part: Cellular automata can be viewed as either computers or logical universes where computers may be embbed. What i am trying to understand is, is it always a given Cellular Automaton is a computer ? like Elementary Cellular Automata {ECA} rule 110 and rule 90, i know that rule 110 is universal which makes it a computer but what about rule 90 ... what about other ECA rules are they also computers ? Suppose we have a cellular automaton (x), how do we know that it is a logical universe (as stated by the author), how can we know if it is a logical universe where computers can be embedd in or not and how can we know if it is just a computer ? I know that logical universes are Cellular Automata, but what dimensions these universes should be to implement a computer easily, 1D like ECA or 2D like Conway's Game of Life ? Answer: "... is it always a given Cellular Automaton is a computer?"... I interpret "is a computer" as "being capable of universal computation" or in other words "being capable of simulating an arbitrary Turing machine". The answer is clearly NO; a trivial example is a CA that doesn't modify the input: 0[0]0 -> 0 0[0]1 -> 0 0[1]0 -> 1 0[1]1 -> 1 1[0]0 -> 0 1[0]1 -> 0 1[1]0 -> 1 1[1]1 -> 1 (rule 204) According to Wolfram's (qualitative) classification, only type-4 CAs exhibit a behaviour that can potentially be "universal". But it is an informal statement (like many others in Wolfram's NKS) ... in order to say that a particular CA is/is not Universal you must prove it formally. For a more rigorous introduction to the argument I suggest you to read some research articles on the subject, for example: Kristian Lindgren, Mats G. Nordhal. Universal Computation in Simple One-Dimensional Cellular Automata. Complex Systems 4 (1990) 299-318 Or - for a good, more general, introduction to computational universality - read Chapter 7: The Grand Unified Theory of Computation of the (IMO excellent) book: Cristopher Moore, Stephan Mertens. The Nature Of Computation (2011)
{ "domain": "cs.stackexchange", "id": 5410, "tags": "computation-models, cellular-automata" }
Why are neural networks considered to be artificial intelligence?
Question: Why are we now considering neural networks to be artificial intelligence? Answer: Why are we now considering neural networks to be artificial intelligence? "We" aren't. It is generally due to reporting by media sources that simplify science and technology news. The definition of AI is somewhat fluid, and also contentious at times, but in research and scientific circles it has not changed to the degree that AI=NN. What has happened is that research into neural networks has produced some real advances in the last decade. These advances have taken research-only issues such as very basic computer vision, and made them good enough to turn into technology products that can be used in the real world on commodity hardware. These are game-changing technology advances, and they use neural networks internally. Research and development using neural networks is still turning out new and improved ideas, so has become a very popular area to learn. A lot of research using neural networks is also research into AI. Aspects such as computer vision, natural language processing, control of autonomous agents are generally considered parts of AI. This has been simplified in reporting, and used by hyped-up marketing campaigns to label pretty much any product with a NN in it as "Artificial Intelligence". When often it is more correctly statistics or Data Science. Data Science is another term which has been somewhat abused by media and technology companies - the main difference between use of AI and Data Science is that Data Science was a new term, so did not clash with pre-existing uses of it. The rest of AI as a subject and area of study has not gone away. Some of it may well use neural networks as part of a toolkit to build or study things. But not all of it, and even with the use of NNs, the AI part is not necessarily the neural network.
{ "domain": "ai.stackexchange", "id": 3132, "tags": "neural-networks, terminology, definitions, social" }
How to fix a corrupted (boilerplate) toplevel.cmake file?
Question: I accidentally replaced my CMakeLists.txt file in my catkin_ws/src directory and then ran catkin_make. This replaced the toplevel.cmake located in this directory /opt/ros/indigo/share/catkin/cmake. Resulting in the following error: CMake Error: File /home/user/projects/catkin_ws/src/package.xml does not exist. CMake Error at /opt/ros/indigo/share/catkin/cmake/stamp.cmake:10 (configure_file): configure_file Problem configuring file Call Stack (most recent call first): /opt/ros/indigo/share/catkin/cmake/catkin_package_xml.cmake:61 (stamp) /opt/ros/indigo/share/catkin/cmake/catkin_package_xml.cmake:39 (_catkin_package_xml) /opt/ros/indigo/share/catkin/cmake/catkin_package.cmake:95 (catkin_package_xml) CMakeLists.txt:37 (catkin_package) CMake Error at /opt/ros/indigo/share/catkin/cmake/catkin_package.cmake:112 (message): catkin_package() 'catkin' must be listed as a buildtool dependency in the package.xml Call Stack (most recent call first): /opt/ros/indigo/share/catkin/cmake/catkin_package.cmake:98 (_catkin_package) CMakeLists.txt:37 (catkin_package) This is the same question asked here, however I'm not able to follow the answer given. I tried of replacing the toplevel.cmake located in the directory the /opt/ros/indigo/share/catkin/cmake, however I couldn't find the original one (or how to generate it again). To be more specific, I would like to know what the best way to proceed is (should I reinstall ROS or is there a better solution?). Originally posted by jsanch2s on ROS Answers with karma: 136 on 2016-02-20 Post score: 0 Original comments Comment by gvdhoorn on 2016-02-20: For future questions: please use the Preformatted text button (the one with 101010 on it) for console copy/pastes instead of Block quote. As the name suggests, it preserves the formatting better (Block quote rearranges things in sometimes unpredictable ways). Answer: First try to unlink / rm the src/CMakeLists.txt in your workspace. Then try building your workspace again (just run catkin_make in /home/user/projects/catkin_ws). That should work, provided you did not actually edit the file in /opt/ros/indigo/.. (that would have required sudo, so you should know / remember that). If you did change toplevel.cmake itself, then you can just re-install the ros-$DISTRO-catkin package using apt-get: sudo apt-get install --reinstall ros-indigo-catkin for Indigo. That should replace the edited file with the original version. Building your workspace should now work again. Originally posted by gvdhoorn with karma: 86574 on 2016-02-20 This answer was ACCEPTED on the original site Post score: 2 Original comments Comment by jsanch2s on 2016-02-20: Thank you very much! That worked perfectly! Comment by gvdhoorn on 2016-02-20: Good to hear you got it fixed. Can you tell us which of the two possible fixes worked for you? Comment by jsanch2s on 2016-02-20: I had to do the re-install of catkin (i.e. sudo apt-get install --reinstall ros-indigo-catkin), since the toplevel.cmake file in the /opt directory was indeed the wrong one.
{ "domain": "robotics.stackexchange", "id": 23845, "tags": "cmake" }
How can a black hole rotate if time dilation stops time at the event horizon?
Question: How does a black hole rotate if time is dilated to infinity (e.g. stopped) at the event horizon? Note: this is relevant to this question, but different: How can a singularity in a black hole rotate if it's just a point? Edit: I am considering this from an external reference frame (e.g. like what we would see from Earth, or perhaps even an object somewhat close to the black hole, but not very close to its event horizon). Answer: The statement "time is dilated to infinity at the horizon" is a (very imprecise) way of saying that the event horizon is a null/lightlike surface. However, as is clear from light-rays, being null/lightlike is no impediment to moving. In particular it is possible for a null/lightlike surface to rotate. (Just as photons move `despite' that "time is dilated to infinity on a light-ray".
{ "domain": "physics.stackexchange", "id": 45566, "tags": "black-holes, angular-momentum, event-horizon, kerr-metric, frame-dragging" }
How are the eigenvalues of $\rho=\frac12(|a\rangle\!\langle a| +|b\rangle\!\langle b|)$ derived?
Question: Let's say I have a density matrix of the following form: $$ \rho := \frac{1}{2} (|a \rangle \langle a| + |b \rangle \langle b|), $$ where $|a\rangle$ and $|b\rangle$ are quantum states. I saw that the eigenvalues of this matrix are: $$ \frac{1}{2} \pm \frac{|\langle a | b \rangle|}{2}. $$ I was just wondering how this is derived. It seems logical, i.e if $|\langle a | b \rangle| = 1$ then the eigenvalues are $0$ and $1$, otherwise if $|\langle a | b \rangle| = 0$ then they are half and half. This means that the entropy of the system would either be $0$ or $1$. But I was just wondering how to calculate the eigenvalues from $\rho$. Answer: For this it suffices to consider the two-dimensional subspace spanned by $|a\rangle$ and $|b\rangle$. Let $|0\rangle$ and $|1\rangle$ be an orthonormal basis of this subspace. Then $$\begin{align} |a\rangle =& a_0 |0\rangle + a_1 |1\rangle\\ |b\rangle =& b_0 |0\rangle + b_1 |1\rangle \end{align} $$ and $$\rho = \frac{1}{2}\left(\begin{array}{cc} a_0 a_0^*+b_0b_0^* & a_0a_1^* + b_0 b_1^*\\ a_1 a_0^*+ b_1b_0^* & a_1a_1^*+b_1b_1^* \end{array}\right).$$ That is, now you have a 2x2 Hermitian matrix and calculate its eigenvalues as usual. Hint: A Hermitian matrix $$\left(\begin{array}{cc} a & c + d i\\ c - d i & b \end{array}\right)$$ has eigenvalues $\frac{1}{2}(a + b \pm \sqrt{(a-b)^2+ 4 (c^2+d^2)})$.
{ "domain": "quantumcomputing.stackexchange", "id": 2401, "tags": "quantum-state, density-matrix, textbook-and-exercises, linear-algebra" }
RVIZ is unable to show the map from hector_slam
Question: I do not have any errors. I am able to see the laserscan and it is able to detect and show the movement of the sensor. However, I am unable to see the map. I have put in the respective topics and fixed frames as many tutorials have mentioned. Can anyone help me check if anything is wrong? Thank you. view_sick.launch file: <launch> <include file="$(find sick_tim)/launch/sick_tim551_2050001.launch" /> <node name="rviz" pkg="rviz" type="rviz" args="-d $(find sick_tim)/ rviz/sick.rviz"/> </launch> hector_mapping_demo.launch file: <launch> <node pkg="hector _mapping" type="hector_mapping" name="hector_mapping" output="screen"> <!-- Frame names --> <param name="pub_map_odom_transform" value="true"/> <param name="map_frame" value="map" /> <param name="base_frame" value="base_link" /> <param name="odom_frame" value="base_link" /> <!-- Tf use --> <param name="use_tf_scan_transformation" value="true"/> <param name="use_tf_pose_start_estimate" value="false"/> <!-- Map size / start point --> <param name="map_resolution" value="0.05"/> <param name="map_size" value="2048"/> <param name="map_start_x" value="0.5"/> <param name="map_start_y" value="0.5" /> <param name="laser_z_min_value" value = "-1.0" /> <param name="laser_z_max_value" value = "1.0" /> <param name="map_multi_res_levels" value="2" /> <param name="map_pub_period" value="2" /> <param name="laser_min_dist" value="0.4" /> <param name="laser_max_dist" value="5.5" /> <param name="output_timing" value="false" /> <param name="pub_map_scanmatch_transform" value="true" /> <!--<param name="tf_map_scanmatch_transform_frame_name" value="scanmatcher_frame" />--> <!-- Map update parameters --> <param name="update_factor_free" value="0.4"/> <param name="update_factor_occupied" value="0.7" /> <param name="map_update_distance_thresh" value="0.2"/> <param name="map_update_angle_thresh" value="0.06" /> <!-- Advertising config --> <param name="advertise_map_service" value="true"/> <param name**="scan_subscriber_queue_size" value="5"/> <param name="scan_topic" value="scan"/> </node> <node pkg="tf" type="static_transform_publisher" name="base_to_laser_broadcaster" args="0 0 0 0 0 0 /base_link /laser 100"/> <node pkg="rviz" type="rviz" name="rviz" args="-d $(find hector_slam_launch)/rviz_cfg/mapping_demo.rviz"/> </launch> Command: cd ~/catkin_make catkin_make roslaunch sick_tim view_sick.launch roslaunch sick_tim hector_mapping_demo.launch bag info: path: 2018-10-18-11-51-00.bag version: 2.0 duration: 31.8s start: Oct 18 2018 11:51:01.40 (1539834661.40) end: Oct 18 2018 11:51:33.15 (1539834693.15) size: 79.0 MB messages: 3253 compression: none [18/18 chunks] types: diagnostic_msgs/DiagnosticArray [60810da900de1dd6ddd437c3503511da] dynamic_reconfigure/Config [958f16a05573709014982821e6822580] dynamic_reconfigure/ConfigDescription [757ce9d44ba8ddd801bb30bc456f946f] geometry_msgs/PoseStamped [d3812c3cbc69362b77dc0b19b345f8f5] geometry_msgs/PoseWithCovarianceStamped [953b798c0f514ff060a53a3498ce6246] nav_msgs/MapMetaData [10cfc8a2818024d3248802c00c95f11b] nav_msgs/OccupancyGrid [3381f2d731d4076ec5c71b0759edbe4e] rosgraph_msgs/Log [acffd30cd6b6de30f120938c17c593fb] sensor_msgs/LaserScan [90c7ef2dc6895d81024acba2ac42f369] sensor_msgs/PointCloud [d8e9c3f5afbdd8a130fd1d2763945fca] tf2_msgs/TFMessage [94810edda583a504dfda3829e70d7eec] topics: /diagnostics 31 msgs : diagnostic_msgs/DiagnosticArray /map 17 msgs : nav_msgs/OccupancyGrid /map_metadata 1 msg : nav_msgs/MapMetaData /poseupdate 479 msgs : geometry_msgs/PoseWithCovarianceStamped /rosout 17 msgs : rosgraph_msgs/Log (5 connections) /rosout_agg 6 msgs : rosgraph_msgs/Log /scan 474 msgs : sensor_msgs/LaserScan /sick_tim571_2050101/parameter_descriptions 1 msg : dynamic_reconfigure/ConfigDescription /sick_tim571_2050101/parameter_updates 1 msg : dynamic_reconfigure/Config /slam_cloud 482 msgs : sensor_msgs/PointCloud /slam_out_pose 474 msgs : geometry_msgs/PoseStamped /tf 1269 msgs : tf2_msgs/TFMessage (2 connections) /tf_static 1 msg : tf2_msgs/TFMessage! image description Originally posted by rui on ROS Answers with karma: 28 on 2018-10-22 Post score: 0 Original comments Comment by Delb on 2018-10-22: Are they your actuals launch files ? I don't know if it's just typos but things like <!**-- or param name**= would generate errors for sure. Comment by rui on 2018-10-22: typo typo sry Comment by Delb on 2018-10-22: You also launch rviz twice with the same name, which launch file are you actually using ? Comment by rui on 2018-10-22: the bottom one Comment by curi_ROS on 2018-10-24: What do you get when you echo the map topic? Answer: <?xml version="1.0"?> <launch> <arg name="tf_map_scanmatch_transform_frame_name" default="scanmatcher_frame"/> <arg name="base_frame" default="base_link"/> <arg name="odom_frame" default="base_link"/> <arg name="pub_map_odom_transform" default="true"/> <arg name="scan_subscriber_queue_size" default="5"/> <arg name="scan_topic" default="scan"/> <arg name="map_size" default="2048"/> <node pkg="hector_mapping" type="hector_mapping" name="hector_mapping" output="screen"> <!-- Frame names --> <param name="map_frame" value="map" /> <param name="base_frame" value="$(arg base_frame)" /> <param name="odom_frame" value="$(arg odom_frame)" /> <!-- Tf use --> <param name="use_tf_scan_transformation" value="true"/> <param name="use_tf_pose_start_estimate" value="false"/> <param name="pub_map_odom_transform" value="$(arg pub_map_odom_transform)"/> <!-- Map size / start point --> <param name="map_resolution" value="0.050"/> <param name="map_size" value="$(arg map_size)"/> <param name="map_start_x" value="0.5"/> <param name="map_start_y" value="0.5" /> <param name="map_multi_res_levels" value="2" /> <!-- Map update parameters --> <param name="update_factor_free" value="0.4"/> <param name="update_factor_occupied" value="0.9" /> <param name="map_update_distance_thresh" value="0.4"/> <param name="map_update_angle_thresh" value="0.02" /> <param name="laser_z_min_value" value = "-1.0" /> <param name="laser_z_max_value" value = "1.0" /> <!-- Advertising config --> <param name="advertise_map_service" value="true"/> <param name="scan_subscriber_queue_size" value="$(arg scan_subscriber_queue_size)"/> <param name="scan_topic" value="$(arg scan_topic)"/> <!-- Debug parameters --> <!-- <param name="output_timing" value="false"/> <param name="pub_drawings" value="true"/> <param name="pub_debug_output" value="true"/> --> <param name="tf_map_scanmatch_transform_frame_name" value="$(arg tf_map_scanmatch_transform_frame_name)" /> </node> <node pkg="tf" type="static_transform_publisher" name="base_to_laser_broadcaster" args="0 0 0 0 0 0 base_link laser 30"/> </launch> This is my launch file. Maybe it can serve as a reference for your troubleshooting process. It is working fine on RPLIDAR running with Raspberry Pi 3. I am not sure this is your expected solution but due to the word limit in the comment section, I am unable to share with you my launch file. Therefore I reply as a solution. Hopefully, it is useful for you. Originally posted by KinWah with karma: 91 on 2018-10-22 This answer was ACCEPTED on the original site Post score: 1 Original comments Comment by rui on 2018-10-23: I am using raspberry pi 3 B+ may i ask which OpenGL config did you use? Comment by KinWah on 2018-10-24: I think our project is slightly different. my raspberry (slave) is just to publish laser data and the mapping is done on laptop (master). I am not sure how to check opengl config :3 i am new to this. but i use the default because as far as i remember, i did not change any of it.
{ "domain": "robotics.stackexchange", "id": 31942, "tags": "rviz, hector-slam, ros-kinetic" }
Name comparison using fuzzy string matching
Question: I'm somewhat new to python and wrote this piece of code to do a string comparison of accounts that are being requested for import into our data base against accounts that are already present. The issue is that the accounts currently in our DB is over 65K and I'm comparing over 5K accounts for import causing this code to take over 5 hours to run. I suspect this has to do with the loop I'm using but I'm not certain how to improve it. TLDR; I need help optimizing this code so it has a shorter run time. from fuzzywuzzy import fuzz from fuzzywuzzy import process accounts_DB = pd.read_csv("file.csv") #65,000 rows and 15 columns accounts_SF = pd.read_csv("Requested Import.csv") #5,000 rows and 30 columns def NameComparison(DB_account, choices): """Function uses fuzzywuzzy module to perform Levenshtein distance string comparison""" return(process.extractBests(DB_account, choices, score_cutoff= 95)) options = accounts_sf["Account Name"] a_list = [] for i in range(len(accounts_db)): a_list.append(NameComparison(accounts_db.at[i,"Company Name"], options)) b_list = pd.DataFrame(a_list) b_list.to_csv("Matched Accounts.csv") Answer: To apply the same function to each row of a dataframe column, you usually use pd.Series.map or pd.Series.apply. You can thus simplify your code to: from functools import partial from fuzzywuzzy import process accounts_DB = pd.read_csv("file.csv") #65,000 rows and 15 columns accounts_SF = pd.read_csv("Requested Import.csv") #5,000 rows and 30 columns best_matches = partial(process.extractBests, choices=accounts_SF['Account Name'], score_cutoff=95) accounts_DB['Company Name'].map(best_matches).to_csv("Matched Accounts.csv")
{ "domain": "codereview.stackexchange", "id": 33096, "tags": "python, performance" }
What is the name of this tiny creature?
Question: Place: South India, Metropolitan, Year: 2017; Night Time. Size: 1-2 cm (Very Tiny) Answer: It's 'Indian skipper'. Family :Hesperiidae. Source: http://abeautiful-butterflys.blogspot.in/2010/04/hesperiidae-butterflies-of-india.html
{ "domain": "biology.stackexchange", "id": 7181, "tags": "species-identification, entomology, lepidoptera" }
Finding gender affinity for businesses
Question: What are the different models I can use to find the gender affinity of businesses using yelp dataset-- https://www.kaggle.com/yelp-dataset/yelp-dataset . I need to find Probablity (Male buying from a merchant) and Probablity (Female buying from a merchant) where both probablities add to 1 . There is no information about gender so I can use the Genderize api to find gender using names of users. This is an unsupervised problem Answer: Since you're only interested in the gender affinity of businesses in your dataset, no model is needed. You can compute gender affinity directly: For each business, find the total number of customers, the total number of male customers, and the total number of female customers. Then $Pr(Male) = \frac{males}{total}$ and $Pr(Female) = \frac{females}{total}$
{ "domain": "datascience.stackexchange", "id": 7534, "tags": "nlp, machine-learning-model, probability" }
how to call node from overlay package from a package in hydro default installation
Question: Hi, I have installed husky_navigation which is the standard navigation package for the Clearpath Husky robot directly from the hydro installation ros-hydro-husky-navigation. I'm running a launchfile within that package that calls the slam_gmapping node of the gmapping package. However, I wanted to modify the behavior of slam_gmapping a bit and therefore created an overlay of the gmapping package and made the required changes. This node now functions as required. But when I launch the husky_navigation launchfile, it still launches the hydro default installation of slam_gmapping and NOT my overlay. How do I specify this in the launchfile so that it calls my overlayed package? Thank you. PS - Let me know if I wasn't clear with some of the terminology here. I don't know what a default debian package installation of a hydro package is called. Originally posted by 2ROS0 on ROS Answers with karma: 1133 on 2014-07-30 Post score: 0 Original comments Comment by BennyRe on 2014-07-30: Where does roscd slam_gmapping lead you to? Answer: This should happen automatically if you've sourced the overlay with your modified packages in it before you run roslaunch. Perhaps there's something unusual about the way you run the husky navigation launch file that is resetting the environment? Originally posted by ahendrix with karma: 47576 on 2014-07-30 This answer was ACCEPTED on the original site Post score: 1 Original comments Comment by 2ROS0 on 2014-07-31: I haven't changed anything in the default launch file. I sourced the overlay again and it worked but I know that I had sourced it earlier too. So there's got to be something I'm running that's resetting the environment. How would I find something that resets the environment? Thank you.
{ "domain": "robotics.stackexchange", "id": 18835, "tags": "slam, navigation, clearpath, husky, gmapping" }
Send .wav files to other ROS module
Question: Hi! I am TOTALLY new in ROS. I have a package consisting of two ROS modules where the first module takes input from a microphone and saves it as a ".wav" file. I want this module to publish this ".wav"-file to a topic so that the other ROS module is able to receive it(subscribe the topic). How do I do this? I have been looking at the ROS package called audio_common, but I cant figure out how to do it. Do I need to convert the ".wav"-file into bytes and stuff before sending? I have no idea! Any advices would help here! Originally posted by steinaak on ROS Answers with karma: 151 on 2016-03-05 Post score: 1 Original comments Comment by ahendrix on 2016-03-05: Are you trying to stream audio, or are you trying to capture short bursts? What are you trying to do with the audio samples in the receiving node? Comment by steinaak on 2016-03-06: Hey! Now, the microphone module captures small bursts(.wav files with duration up to 10 seconds). The other module will receive this audio(if I am able to solve that problem) and make a call to an API to translate the speech into text. What message type should I use? And how to do it? Answer: There isn't a good audio transport convention in ROS. audio_common_msgs provides the AudioData message, but there's no documented convention about what data it's supposed to carry, and no way to indicate which format the data is in. audio_capture usually uses it for transporting chunks of MP3 streams, but you could use it for wav data too. If you need any kind of metadata with your audio, you should create your own message. In practice, if you want to do speech recognition I'd recommend the ROS pocketsphinx package. It's what most ROS users use for speech recognition, and it's reasonably well maintained. The only place where it may not work is if you want to do the audio capture and speech processing on two different computers. Originally posted by ahendrix with karma: 47576 on 2016-03-06 This answer was ACCEPTED on the original site Post score: 1 Original comments Comment by steinaak on 2016-03-06: Thank you very much for your reply! I have had a look at the pocketsphinx package already, but it seems like you have to build the vocabulary of words to recognize yourself? Or does the pocketsphinx include a vocabulary so it is able to recognize speech already? Comment by ahendrix on 2016-03-06: There are two varieties of speech recognition - dictation and vocabulary-based recognizers. Vocabulary-based recognizers are usually good for commands. Dictation recognizers use a much larger vocabulary are are usually good for "speech typing". Comment by steinaak on 2016-03-07: Thank you! Is there a dictation-recognizer in pocketsphinx that works well? Comment by ahendrix on 2016-03-07: I'm not aware of anyone using pocketsphinx for dictation.
{ "domain": "robotics.stackexchange", "id": 24003, "tags": "ros, audio, audio-common" }
Class for locking shared disk directory
Question: I'm writing an application to sync files between two directories. In order to prevent simultaneous access to the shared directory from several computers, I implemented blocking of the shared directory. The class below implements this lock. The application is built and at first glance works without errors. But I'm new to writing multithreaded and file system code and I'm not sure I did everything right. Please look at my directory locker class. Is everything right there? Are there any problems? If so, how can they be corrected? Can it be made better? Update from 12.11: I forgot to write: The application is platform independent and I prefer not to use platform specific functions. It is advisable to use functions from the standard library. The shared directory may be located on a network drive and system functions such as shlock or flock may not be available there. Header file: #pragma once #include <filesystem> #include <future> class DirectoryLocker { public: DirectoryLocker(); ~DirectoryLocker(); bool tryToLock(const std::wstring& dirPath); void freeLock(); bool isLockValid() const; static constexpr const wchar_t* LockFileName = L".momentasync.lock"; private: std::filesystem::path m_pathToLockFile; std::atomic<bool> m_stopLockFileRefresh; std::future<void> m_lockFileRefreshFuture; }; CPP file: #include "DirectoryLocker.h" #include <chrono> #include <fstream> using namespace std::chrono_literals; const auto LockLifeTime = 15min; const auto LockRefreshReserve = 1min; static void writeExpirationTimeToLockFile(FILE* lockFile); static std::chrono::system_clock::time_point getLockFileExpirationTime(const std::filesystem::path& pathToLockFile); static std::future<void> runLockFileRefreshTask(const std::atomic<bool>& stopLockFileRefresh, const std::filesystem::path& pathToLockFile); DirectoryLocker::DirectoryLocker() {} DirectoryLocker::~DirectoryLocker() { // According to Effective C++ Item 8: Prevent Exceptions from Leaving Destructors // we provide separate function freeLock because freing lock can throw exceptions. try { if (isLockValid()) freeLock(); } catch (...) {} } bool DirectoryLocker::tryToLock(const std::wstring& dirPath) { m_pathToLockFile = dirPath; m_pathToLockFile /= LockFileName; FILE* fp = std::fopen(m_pathToLockFile.string().c_str(), "wx"); if (!fp) { if (getLockFileExpirationTime(m_pathToLockFile) >= std::chrono::system_clock::now()) return false; // Lock file is expired. Remove it and try to create again. std::filesystem::remove(m_pathToLockFile); fp = std::fopen(m_pathToLockFile.string().c_str(), "wx"); if (!fp) // Someone else created a new lock file before us. return false; } writeExpirationTimeToLockFile(fp); std::fclose(fp); m_stopLockFileRefresh.store(false, std::memory_order_seq_cst); m_lockFileRefreshFuture = runLockFileRefreshTask(m_stopLockFileRefresh, m_pathToLockFile); return true; } void DirectoryLocker::freeLock() { m_stopLockFileRefresh.store(true, std::memory_order_seq_cst); if (m_lockFileRefreshFuture.valid()) m_lockFileRefreshFuture.wait(); std::filesystem::remove(m_pathToLockFile); std::fstream f; f.close(); } bool DirectoryLocker::isLockValid() const { return m_lockFileRefreshFuture.valid() && m_lockFileRefreshFuture.wait_for(0ms) == std::future_status::timeout; } static void writeExpirationTimeToLockFile(FILE* lockFile) { auto expirationTime = std::chrono::system_clock::now() + LockLifeTime; std::ostringstream out; out << std::chrono::duration_cast<std::chrono::seconds>(expirationTime.time_since_epoch()).count() << std::endl; out << expirationTime; fputs(out.str().c_str(), lockFile); } static void writeExpirationTimeToLockFile(const std::filesystem::path& pathToLockFile) { std::ofstream file(pathToLockFile); if (!file) throw std::runtime_error("Could not open lock file: " + pathToLockFile.string()); auto expirationTime = std::chrono::system_clock::now() + LockLifeTime; std::ostringstream out; file << std::chrono::duration_cast<std::chrono::seconds>(expirationTime.time_since_epoch()).count() << std::endl; file << expirationTime; } static std::chrono::system_clock::time_point getLockFileExpirationTime(const std::filesystem::path& pathToLockFile) { std::ifstream file(pathToLockFile); if (!file) throw std::runtime_error("Could not open lock file: " + pathToLockFile.string()); std::string line; if (!std::getline(file, line)) throw std::runtime_error("Lock file is empty: " + pathToLockFile.string()); try { long long sec = std::stol(line); auto timePoint = std::chrono::time_point<std::chrono::system_clock>(std::chrono::seconds(sec)); auto diffToFuture = timePoint - std::chrono::system_clock::now(); if (diffToFuture > LockLifeTime || sec <= 0) { std::ostringstream out; out << "Invalid expiration time in the lock file." << std::endl; out << "Expiration time: " << timePoint << std::endl; out << "Expiration time in seconds sicne epoch: " << sec << std::endl; out << "First line of the lock file: " << line << std::endl; throw std::runtime_error(out.str()); } return timePoint; } catch (const std::invalid_argument&) { throw std::runtime_error("Invalid first line in the lock file.\nFirst line: \"" + line + "\".\nLock file: " + pathToLockFile.string()); } catch (const std::out_of_range&) { throw std::runtime_error("Number in the first line of the lock file is out of range.\nFirst line: \"" + line + "\".\nLock file: " + pathToLockFile.string()); } } static std::future<void> runLockFileRefreshTask(const std::atomic<bool>& stopLockFileRefresh, const std::filesystem::path& pathToLockFile) { return std::async(std::launch::async, [&stopLockFileRefresh, pathToLockFile]() { while (!stopLockFileRefresh.load(std::memory_order_seq_cst)) { std::this_thread::sleep_for(std::chrono::milliseconds(500)); if (std::chrono::system_clock::now() > getLockFileExpirationTime(pathToLockFile) - LockRefreshReserve) { writeExpirationTimeToLockFile(pathToLockFile); } } }); } Usage example: DirectorySyncronizer::SyncResult DirectorySyncronizer::syncronizeFiles( const std::wstring& sharedPath, const std::atomic<bool>& stopSync, const std::function<void(const std::wstring&)>& localSyncProgress, const std::function<void(const std::wstring&)>& sharedSyncProgress) { try { DirectoryLocker dirLocker; if (!dirLocker.tryToLock(sharedPath)) return { false, "The shared directory already locked by another computer."}; // Imitation of file copying for (int i = 0; i < 360; ++i) { if (stopSync.load(std::memory_order_seq_cst)) { dirLocker.freeLock(); return { false, "Syncronization was stopped by the user." }; } if (!dirLocker.isLockValid()) break; std::this_thread::sleep_for(500ms); localSyncProgress(std::format(L"File updated: {}", i)); sharedSyncProgress(std::format(L"File updated: {}", i)); } dirLocker.freeLock(); return { true, "" }; } catch (const std::exception& error) { return { false, error.what() }; } } Answer: This is not safe Let's consider two threads that each call tryToLock() at the same time, and there is an expired lockfile on the disk: FILE* fp = std::fopen(m_pathToLockFile.string().c_str(), "wx"); if (!fp) { if (getLockFileExpirationTime(m_pathToLockFile) >= std::chrono::system_clock::now()) return false; // Lock file is expired. Remove it and try to create again. Both threads are now right at that comment. Now consider that one thread runs ahead of the other, and calls: std::filesystem::remove(m_pathToLockFile); fp = std::fopen(m_pathToLockFile.string().c_str(), "wx"); That works. But now the second thread catches up and it too executes: std::filesystem::remove(m_pathToLockFile); fp = std::fopen(m_pathToLockFile.string().c_str(), "wx"); So it will remove the file opened by the first thread. But that means the second fopen() will succeed. Both threads will now think they have the lock. Either you should use the file locking mechanisms of your operating system, like for example flock() (which might not be portable and/or not work on shared drives), or rely on rename() being atomic, like J_H mentioned in his answer. It is not robust There are lots of things that can go wrong that you are not handling in your code. What if someone renames the directory you are in while you are holding the lock? What if daylight saving time kicks in? What if the two processes run on different computers, and the two computers have the clock set to a different time? What if the lock file could be opened but writing to it fails because the disk is full? Part of these issues can be solved by adding some more checking to the code, but some problems are unsolvable without using a completely different way to do locking. Again, the operating system might have better functions available to do proper file locking. It also depends on the protocol used to share the drives. For example, flock() works on NFS, but other protocols might behave differently, and/or have different ways to lock files. Improve the interface Use the [[nodiscard]] attribute for tryToLock(); this will catch mistakes like calling tryToLock() but not using its return value. I would also suggest that you let the constructor take the path to the directory, and rename tryToLock() to try_lock(), and freeLock() to unlock(), and perhaps add a lock() that waits indefinitely until it can get the lock. That way it has the same interface as std::mutex, which also means you can use std::lock_guard, std::try_lock() and other utilities that work on lockable objects.
{ "domain": "codereview.stackexchange", "id": 45217, "tags": "c++, c++11, multithreading, file-system" }
O(NlogN) sorting algorithms in C++
Question: The following code is my lab which focused on implementing any 3 known sorting algorithms out there. Since the professor gave us a choice to pick any of them and compare their execution time I chose Quick Sort, Selection Sort, and Heap Sort. I had the option to choose bogo or bubble sort but I think that's boring and doesn't present a challenge. I had to measure the execution time for every algorithm and print the size = 10 one, I used the chrono library to measure the execution time of the algorithms. Is there a way to speed some of the algorithms up? Would anyone recommend different design choices? I know the heap sort could be implemented with STL using std::make_heap() and then use std::sort() but when I thought about it, it felt like it defies the purpose of the lab (own implementation). I used a random pivot since I read that QS is very slow if the elements are sorted/partially sorted/ or all the same. I was using rand() which made a system call every iteration and really slowed down performance. Would the median of three be better in this case? Restrictions: C++ 11 standard Flags: -Werror -Wall -pedantic No templates Can't use std::vector or std::array or std::list... I had to pass a new, random, non-sorted array (not a copy of the original) into every single algorithm separately I find the last one stupid, since it offers no "control" over the time measurements, especially for quicksort. The odds of getting a size = 10 sorted array out of 100000 numbers are slim but still there. Edit: in the merge function I used i, j, and k as my variable names, which could go in the "bad practice" basket...This is due to the lack of MS I was following my professor's flowchart that she made in class. Also I know that C++ prefers to use camelCase for variables over snake_case, I prefer snake_case and I hope that's not an issue. Perfect timing lol: I got my grade back (92/100), and I got downgraded (-4) for readability of code and I quote "Comments could be better"(-4). Code: /** * @author Jakob Balkovec * @file lab5.cpp [Driver Code] * @note Driver code for lab5 * * @brief This assignment focuses on using sorting algorithms such as: * - Heap Sort * - Quick Sort * - Merge Sort * @note use of function pointers */ #include <iostream> #include <chrono> #include <random> #include <iomanip> /** * @brief Maintains the max heap property of a subtree rooted at index 'root'. * @param arr The array to be sorted. * @param size The size of the heap/subtree. * @param root The index of the root of the subtree. */ void heapify(int *arr, int size, int root) { int largest = root; //largest is the root of the heap int left = 2 * root + 1; // L child int right = 2 * root + 2; // R child // if left child is larger than root if (left < size && arr[left] > arr[largest]) { largest = left; } // if right child is larger than current largest if (right < size && arr[right] > arr[largest]) { largest = right; } // if largest is not root if (largest != root) { std::swap(arr[root], arr[largest]); heapify(arr, size, largest); //recursive call } } /** * @brief Performs heap sort on an array. * @param arr The array to be sorted. * @param size The size of the array. */ void heap_sort(int *arr, int size) { // build a max heap for (int i = size / 2 - 1; i >= 0; i--) { heapify(arr, size, i); } // extract elements from heap one by one for (int i = size - 1; i >= 0; i--) { // move current root to the end std::swap(arr[0], arr[i]); // call max heapify on the reduced heap heapify(arr, i, 0); } } /** * @brief Merges two subarrays of arr[] * @param arr The array to be sorted * @param p Starting index of the first subarray * @param q Ending index of the first subarray * @param r Ending index of the second subarray */ void merge(int *arr, int p, int q, int r) { int n1 = q - p + 1; // size of the first subarray int n2 = r - q; // size of the second subarray //temp arrays int* left_sub = new int[n1]; int* right_sub = new int[n2]; //copy elements for(int i = 0; i < n1; i++) { left_sub[i] = arr[p+i]; } //copy elements for(int j = 0; j < n2; j++) { right_sub[j] = arr[q+1+j]; } int i = 0; int j = 0; int k = p; // merge the elements from the temporary arrays back into arr[] in sorted order while(i < n1 and j < n2) { if(left_sub[i] < right_sub[j]) { arr[k] = left_sub[i]; i++; } else { arr[k] = right_sub[j]; j++; } k++; } //copy elements over if any while (i < n1) { arr[k] = left_sub[i]; i++; k++; } //copy elements over if any while (j < n2) { arr[k] = right_sub[j]; j++; k++; } delete[] left_sub; //free memory delete[] right_sub; } /** * @brief Sorts an array using merge sort algorithm * @param arr The array to be sorted * @param p Starting index of the array * @param r Ending index of the array */ void merge_sort_helper(int *arr, int p, int r) { if (p < r) { int q = (p + r) / 2; merge_sort_helper(arr, p, q); merge_sort_helper(arr, q + 1, r); merge(arr, p, q, r); } } /** * @brief Sorts an array using merge sort algorithm * @param arr The array to be sorted * @param size The size of the array */ void merge_sort(int *arr, int size) { merge_sort_helper(arr, 0, size - 1); } /** * @brief Generates a random pivot index between low and high (inclusive) * @param low Starting index of the array * @param high Ending index of the array * @return Random pivot index */ int random_pivot(int low, int high) { return low + rand() % (high - low + 1); } /** * @brief Partitions the array and returns the partition index * @param arr The array to be partitioned * @param low Starting index of the partition * @param high Ending index of the partition * @return Partition index */ int partition(int* arr, int low, int high) { int pivotIndex = random_pivot(low, high); int pivot = arr[pivotIndex]; std::swap(arr[pivotIndex], arr[high]); int i = low - 1; // Index of the smaller element for (int j = low; j <= high - 1; j++) { // If current element is smaller than or equal to the pivot if (arr[j] <= pivot) { i++; // Increment index of smaller element std::swap(arr[i], arr[j]); // Swap current element with the smaller element } } std::swap(arr[i + 1], arr[high]); // Swap the pivot with the element at the partition index return i + 1; // Return the partition index } /** * @brief Sorts an array using the QuickSort algorithm * @param arr The array to be sorted * @param low Starting index of the array * @param high Ending index of the array */ void quick_sort_helper(int* arr, int low, int high) { if (low < high) { int partition_index = partition(arr, low, high); // partition the array and get the partition index quick_sort_helper(arr, low, partition_index - 1); // recursively sort the left subarray quick_sort_helper(arr, partition_index + 1, high); // recursively sort the right subarray } } /** * @brief Sorts an array using the QuickSort algorithm * @param arr The array to be sorted * @param size The size of the array */ void quick_sort(int* arr, int size) { quick_sort_helper(arr, 0, size - 1); } /** * @brief * @param arr */ void print_arr(int *arr, int size) { std::cout << "["; for(int i = 0; i < size; i++) { if(i == size-1) { std::cout << arr[i]; //drop comma if last element } else { std::cout << arr[i] << ", "; } } std::cout << "]" << std::endl; } /** * @brief Checks if the array is sorted by going through every element in the array * @param arr Array of integers * @param size Size of the Array * @return Boolean, True if it's sorted and False if not */ bool sorted(int *arr, int size) { for (int i = 1; i < size; i++) { if (arr[i] < arr[i - 1]) { return false; } } return true; } /** * @brief Measures the execution time of a sorting algorithm on arrays of different sizes. * @param sorting_function The sorting function to be measured. */ void measure_sort(void (*sorting_function)(int*, int)) { int sizes[] = {10, 100, 1000, 10000, 100000}; // sizes of the array int const MAX = 100000; int const SMALL = 10; std::random_device rd; // a seed source for the random number engine std::mt19937 gen(rd()); // mersenne_twister_engine seeded with rd() std::uniform_int_distribution<> distrib(1, MAX); for (auto i = 0; i < 5; i++) { int* arr = new int[sizes[i]]; for(auto j = 0; j < sizes[i]; j++) { //fill array with random numbers arr[j] = distrib(gen); } if (sizes[i] == SMALL) { //print og array before sorting std::cout << "\n[Original]: "; // << std::setw(2); print_arr(arr, sizes[i]); } //{ /** * @note Measure execution time * @typedef std::chrono::high_resolution_clock::time_point as clock for better readability * @typedef std::chrono::microseconds as ms for better readability */ //} typedef std::chrono::high_resolution_clock::time_point clock; typedef std::chrono::microseconds ms; clock start = std::chrono::high_resolution_clock::now(); sorting_function(arr, sizes[i]); clock end = std::chrono::high_resolution_clock::now(); ms duration = std::chrono::duration_cast<ms>(end - start); long long durationCount = duration.count(); if(sizes[i] == SMALL) { std::string const SPACE = " "; //width const to align output std::cout << std::setw(4) << "[Sorted]:" << SPACE; print_arr(arr, sizes[i]); std::cout << std::endl << std::endl; } int const SIZE_W = 9; int const TIME_W = 8; int const W = 6; std::cout << std::left << std::setw(SIZE_W) << "[size]: " << std::setw(W+1) << sizes[i] << std::left <<std::setw(TIME_W) << "[time]: " << std::setw(W) << durationCount << " [ms]" << std::endl; // Clean up dynamically allocated memory delete[] arr; } } /** * @brief Brains of the program, handles the logic * @return void-type */ void run() { /** @note srand seed */ std::cout << std::endl; std::cout << "Measuring Sorting Algorithms" << std::endl; std::cout << "\n[***** [Merge Sort] *****]" << std::endl; measure_sort(merge_sort); std::cout << "\n[***** [Quick Sort] *****]" << std::endl; measure_sort(quick_sort); std::cout << "\n[***** [Heap Sort] *****]" << std::endl; measure_sort(heap_sort); std::cout << std::endl; } /** * @brief Main function of the program, calls run() * @return EXIT_SUCCESS upon successful execution */ int main() { std::srand(static_cast<unsigned int>(std::time(nullptr))); run(); return EXIT_SUCCESS; } Please excuse some typos and spelling errors, English is not my first language and I'm really trying my best. Oh, and also, I am aware that the typedef statements are sort of useless especially when you give them a name like ms, in my mind it seemed right and I thought it improved readability. Answer: I will mainly review your algorithms (except the quicksort, though). Namely, you can go faster with them. Advice 1 - heapify In your heapify, when you sift down an element, you make 3 assignments in std::swap, so we have \$3n\$ assignments to sift \$n\$ times. You can do better: \$n + 1\$ assignments, and here is how: void coderodde_sift_down(int* arr, int index, int heap_size) { int left_child_index = index * 2 + 1; int right_child_index = left_child_index + 1; int maximum_index = index; int target = arr[index]; while (true) { if (left_child_index < heap_size && arr[left_child_index] > target) { maximum_index = left_child_index; } if (maximum_index == index) { if (right_child_index < heap_size && arr[right_child_index] > target) { maximum_index = right_child_index; } } else if (right_child_index < heap_size && arr[right_child_index] > arr[left_child_index]) { maximum_index = right_child_index; } if (maximum_index == index) { arr[maximum_index] = target; return; } arr[index] = arr[maximum_index]; index = maximum_index; left_child_index = index * 2 + 1; right_child_index = left_child_index + 1; } } Advice 2 - Mergesort As most novices do while implementing mergesort, you keep allocating memory for left and right runs at each recursion level worth \$n\$ ints. (In total of \$\Theta(n \log n)\$ worth memory allocations.) One trick you could do is to allocate (only once) a buffer array with exactly the same content as the input array, and keep alternating their roles: at the bottom recursion level, you take two adjacent runs from a source array and merge them into a target array. Then, at the next recursion level, you swap the roles of the two arrays and keep doing that until you merge two topmost runs (covering the entire array) from the source array to the target array. (Using recursion magic, we can ensure that at that point the target array is the input array we want to sort; not the buffer.) (The entire program for running all you sorts and my mergesort + heapsort is behind this link.) Advice 3 - sorted You have defined the sorted function, yet you don't use it. Since the data on each run is random, to me it seems sensible to deploy sorted to make sure that algorithms don't fail.
{ "domain": "codereview.stackexchange", "id": 44766, "tags": "c++, c++11, homework" }
What software is being used in this image recognition system?
Question: I was wondering if anyone knew which piece of software is being used in this video? It is an image recognition system that makes the training process very simple. http://www.ted.com/talks/jeremy_howard_the_wonderful_and_terrifying_implications_of_computers_that_can_learn#t-775098 The example is with car images, though the video should start at the right spot. Answer: I'm pretty sure that the software you're referring to is a some kind of internal research project software, developed by Enlitic (http://www.enlitic.com), where Jeremy Howard works as a founder and CEO. By "internal research project software" I mean either a proof-of-concept software, or a prototype software.
{ "domain": "datascience.stackexchange", "id": 175, "tags": "classification" }
does a DFA converted from NFA always contain 2^Q states?
Question: when converting an NFA to DFA, we create sub-sets of states in the NFA. does it mean that every DFA-converted-from-NFA contain 2^Q states? or if some sub-sets are unreachable then they are not included in it? Answer: As per suggestion, I'm posting this as an answer. Any DFA already is an NFA. Determinizing it will not change the number of states it has, so there are NFA that do not have fewer states than the equivalent minimal DFA. Maybe also a non-trivial example: Take the NFA with states $\{q_o, q_1\}$, alphabet $\Sigma = \{a\}$, initial state $q_0$, transitions $\delta(q_0, a) = \{q_0,q_1\}$ and final state $q_1$. It generates the same language as the DFA with the same set of states and alphabet, but transitions $\delta(q_0,a)=q_1$ and $\delta(q_1,a)=q_1$.
{ "domain": "cs.stackexchange", "id": 2088, "tags": "automata, finite-automata" }
Efficiency of wireless charging for moving cars
Question: The UK is about to start trials of wireless charging for moving cars. What kind of power transfer efficiency is likely to be achievable in such a system, compared to just plugging in the car directly? Answer: This is not exactly a comprehensive answer, but I found an interesting article on The Institution of Engineering and Technology's website. It is fairly interesting, if not incredibly technically detailed, and I encourage you to check it out for projects to learn more about. I'll talk about the most interesting parts here. It has many interesting projects listed. Some are for bus routes, and rather than charging along the whole route, they use 'opportunistic charging' to charge at key places (presumably bus stops and traffic lights) to rapidly transfer energy. This is intended to minimize the disruption and possibly cost of construction (imagine having to dig up an entire busy road to add charging coils). This is called "semi-dynamic charging" by Transport Scotland, who are working on one such project. However, it sounds like in Korea, a more full route system has been operating. [T]he Korea Advanced Institute of Science and Technology (KAIST) is running two online electric vehicle (OLEV) buses on a 12km continuous charging route in the city of Gumi. It claims 85 per cent maximum efficiency in power transfer. You're question is focused on the pure efficiency of the charging capacity, but I think also interesting to consider (or possibly implied in the question) is for what cases these technological limitations make actual use a practical exercise. On that note, I will add that this technology may prove of more usefulness on highways, where range is of particular concern in the arena of electric vehicles, leading to projects going to great lengths to provide very close charging stations (25 to 50 miles) like West Coast Green Highway.
{ "domain": "engineering.stackexchange", "id": 400, "tags": "power-engineering, inductive-charging" }
HashMap implementation, is this good enough for interviews?
Question: After understanding HashMaps, I decided to implement my own using 3 classes HashMap, LinkedList, and Node. I knew how to implement LinkedList from before. Can you please give me feedback on this. Will this be an acceptable implementation for a technical interview? Also do we actually need to implement the data structure needed to solve the problem or use the language library? public class HashMap<K, V> { LinkedList<K, V>[] buckets; HashMap(int size) { buckets = new LinkedList[size]; } public void insert(K key, V value) { Node<K, V> newNode = new Node<>(key, value); int bucket = getIndex(key); //Create bucket if it does not exist. if(buckets[bucket] == null) { buckets[bucket] = new LinkedList<>(); } buckets[bucket].insert(newNode); } public void delete(K key) { int bucket = getIndex(key); if(buckets[bucket] == null) { System.out.println("Hashmap is empty."); return; } buckets[bucket].delete(key); } public Node find(K key) { int bucket = getIndex(key); return buckets[bucket].find(key); } public int getIndex(K key) { return key.hashCode() % buckets.length; } @Override public String toString() { String data = ""; for(int i = 0; i < buckets.length; i++) { if(buckets[i] != null) { data += "[" + i + "]" + " = " + buckets[i].toString() + " null"; data += "\n"; } } return data; } } public class LinkedList<K, V> { Node<K, V> head; public void insert(Node<K, V> newNode) { if(head == null) { head = newNode; return; } Node<K, V> current = head; while(current.getNext() != null) current = current.getNext(); current.setNext(newNode); } public void delete(K key) { if(head == null) return; //If head to be deleted. if(head.getKey() == key) { head = head.getNext(); return; } Node current = head; while(current.getNext().getKey() != key) current = current.getNext(); current.setNext(current.getNext().getNext()); } public Node find(K key) { Node current = head; //While not reached end or not found keep going. while(current != null && current.getKey() != key) { current = current.getNext(); } //Returns null if not found. return current; } @Override public String toString() { String data = ""; Node current = head; while(current != null) { data += "(" + current.getKey() + ", " + current.getValue() + ")" + " -> "; current = current.getNext(); } return data; } } public class Node<K, V> { private K key; private V value; private Node<K, V> next; Node(K key, V value) { this.key = key; this.value = value; } public K getKey() { return key; } public V getValue() { return value; } public Node getNext() { return next; } public void setNext(Node next) { this.next = next; } } public class HashMapTesting { public static void main(String[] args) { HashMap<String, Integer> ages = new HashMap<>(5); //Testing insertion ages.insert("Reda", 22); ages.insert("Mike", 34); ages.insert("Tom", 55); ages.insert("Daniel", 32); ages.insert("Leonardo", 42); System.out.println(ages.toString()); //Testing search System.out.println(ages.find("Daniel").getValue() + "\n"); //Testing deletion ages.delete("Mike"); System.out.println(ages.toString()); } } Output: [0] = (Reda, 22) -> (Mike, 34) -> null [2] = (Leonardo, 42) -> null [4] = (Tom, 55) -> (Daniel, 32) -> null 32 [0] = (Reda, 22) -> null [2] = (Leonardo, 42) -> null [4] = (Tom, 55) -> (Daniel, 32) -> null Answer: Visibility: I find the visibility of your variables strange. Why is "buckets" a package protected variable in your HashMap class? Do you need to use it outside of the class in the package? Your code doesn't indicate that, so it should be private. Same with LinkedList.head. Additionally, why are the constructors package private? I assume your IDE gave you that tip because your main method lies in the same package, but you should ignore that. Make them public. Node.next: Your HashMap returns Nodes and those Nodes have a public getter and setter for the next variable. Using them, one can get a node, set another next and mutate the HashMap from outside. That's certainly not what you (should) want. "getNext" and "setNext" should be package private and your main method should be outside the package to demonstrate that you really understand that these methods shouldn't be accessed by the client. I guess that's what vnp meant. Node<K, V>: As Serverfrog mentioned, your returns miss the generic type information for Node. Imagine you create a HashMap with a String key and integer values. Now you want to find a certain integer and multiply it by two. The problem with this is that your HashMap returns a Node which doesn't know which types are stored in it. So it gives you the value as an Object. Now you have to cast it just because you forgot to add <K, V> to your return type.
{ "domain": "codereview.stackexchange", "id": 32653, "tags": "java, linked-list, interview-questions, reinventing-the-wheel, hash-map" }
Can all mammals swim?
Question: When I checked it seemed trivial to answer: yes, all mammals can swim. But research on the internet provided different information. I found: people and primates cannot swim, but can be taught how to swim giraffes can't swim someone claimed elephants can't swim, but this video ad shows the reverse porcupines nor rhinos can't swim at least some bats can swim, but according to this source there's insufficient data. Of each hit, I found other hits that claimed the reverse, sometimes with proof. Common sense tells me all mammals can swim, but is this true? Answer: With respect to the giraffe claim, this article seems relevant: D. M. Henderson, D. Naish, Predicting the buoyancy, equilibrium and potential swimming ability of giraffes by computational analysis, J Theoretical Biology 265 (2010) 151-159. It cites several non-"random person on the internet" claims that giraffes cannot swim: It is generally thought that giraffes cannot swim, but relevant observations are few. Shortridge (1934) and Goodwin (1954) state that giraffes were poor waders and unable to swim. Crandall (1964) discussed a case where a captive giraffe escaped from a carrying crate, ran to the end of a jetty, and fell into the water. The animal reportedly sank without making any attempt to swim. MacClintock (1973, p. 54) stated ‘Giraffes cannot swim. Rivers are barriers they do not cross’. Wood (1982, p. 20) noted that ‘Because of its extraordinarily anatomical shape the giraffe is one of the very few mammals that cannot swim – even in an emergency! Deep rivers are an impassable barrier to them, and they will avoid large expanses of water like the plague’. They then go on to show that a model giraffe could plausibly swim, writing: "For practical and ethical reasons we are unable to use live giraffes..." They conclude: In summary, the results and speculations of this study show that it is not impossible that a giraffe could propel itself in water, but in terms of energy efficiency relative to that of the horse, it would appear that the costs of aquatic locomotion might be too high. It is reasonable to expect that giraffes would be hesitant to enter water knowing that they would be at a decided disadvantage compared to being on solid ground.
{ "domain": "biology.stackexchange", "id": 1748, "tags": "zoology, locomotion" }
Corrupted ROS install on Debian Jessie after update
Question: Hi! I'm trying to get my nodes to run on my Debian Jessie. I have developed those with Kubuntu 14.04. Both systems have the same ROS version - Indigo - however some libs are not the same. For example libopencv on Debian is 2.4.9 but on Ubuntu 14.04 it is 2.4.8. I have built and installed the Desktop version (rqt, rviz etc. included) on my Debian. It seems that something has gone really wrong. I can't run rviz due to *** Error in `rviz': realloc(): invalid pointer: 0x00007f97b2fad860 *** Aborted My nodelet throws fatal erros at me: [FATAL] [1442423922.398321597]: Service call failed! [camera_nodelet_manager-2] process has died [pid 2652, exit code -11, cmd /opt/ros/indigo/lib/nodelet/nodelet manager __name:=camera_nodelet_manager __log:=/home/rosuser/.ros/log/f7737d60-5c96-11e5-b957-78dd08f4756e/camera_nodelet_manager-2.log]. log file: /home/rosuser/.ros/log/f7737d60-5c96-11e5-b957-78dd08f4756e/camera_nodelet_manager-2*.log [camera/driver-3] process has died [pid 2653, exit code 255, cmd /opt/ros/indigo/lib/nodelet/nodelet load pmd_camboard_nano/driver /camera_nodelet_manager __name:=driver __log:=/home/rosuser/.ros/log/f7737d60-5c96-11e5-b957-78dd08f4756e/camera-driver-3.log]. log file: /home/rosuser/.ros/log/f7737d60-5c96-11e5-b957-78dd08f4756e/camera-driver-3*.log [camera/rectify_depth-5] process has died [pid 2655, exit code 255, cmd /opt/ros/indigo/lib/nodelet/nodelet load image_proc/rectify /camera_nodelet_manager image_mono:=depth/image camera_info:=depth/camera_info image_rect:=depth/image_rect __name:=rectify_depth __log:=/home/rosuser/.ros/log/f7737d60-5c96-11e5-b957-78dd08f4756e/camera-rectify_depth-5.log]. log file: /home/rosuser/.ros/log/f7737d60-5c96-11e5-b957-78dd08f4756e/camera-rectify_depth-5*.log [camera/rectify_amplitude-6] process has died [pid 2656, exit code 255, cmd /opt/ros/indigo/lib/nodelet/nodelet load image_proc/rectify /camera_nodelet_manager image_mono:=amplitude/image camera_info:=amplitude/camera_info image_rect:=amplitude/image_rect __name:=rectify_amplitude __log:=/home/rosuser/.ros/log/f7737d60-5c96-11e5-b957-78dd08f4756e/camera-rectify_amplitude-6.log]. log file: /home/rosuser/.ros/log/f7737d60-5c96-11e5-b957-78dd08f4756e/camera-rectify_amplitude-6*.log [camera/rectify_distance-4] process has died [pid 2654, exit code 255, cmd /opt/ros/indigo/lib/nodelet/nodelet load image_proc/rectify /camera_nodelet_manager image_mono:=distance/image camera_info:=distance/camera_info image_rect:=distance/image_rect __name:=rectify_distance __log:=/home/rosuser/.ros/log/f7737d60-5c96-11e5-b957-78dd08f4756e/camera-rectify_distance-4.log]. log file: /home/rosuser/.ros/log/f7737d60-5c96-11e5-b957-78dd08f4756e/camera-rectify_distance-4*.log This is followed by an endless restarting and starting of the separate processes involved and throwing the same errors over and over again. All I have done in addition to my installation is to add to /usr/local my custom build of PCL and OpenCV (same versions as in upstream Debian Jessie but with some features enabled), which my no means interfere with the same libs but installed from upstream and in different locations. I have test with and without my custom installations - the result is the same. The update here refers to the building and installation of a bunch of additional packages (that I require or might require at some point in the future) among which is the image_pipeline set, tf, nodelet set etc. None gave any indication of being broken during the building process and the following installation. Also it seems to be impossible to use custom OpenCV install because no matter what I do (tried checkinstall and tested the installation with multiple different names for this library) rosdep continues don't give a damn and download and install the upstream version anyway. The only rviz-related issue that I can think of was during the installation. On Debian rosdep continuously does weird stuff with libogre. Debian's repos have both version 1.8 and 1.9. However for some reason rosdep continues to try installing 1.9 even though I have found out that 1.9 is a no go (rosinstall fails). So I have to reinstall 1.8 every single time I update my ROS space where I keep my sources and builds for my custom installation. Beside that I haven't had any issues during installation. I have tried several other things in ROS and they seem to run without a problem. I've checked for example the actionlib_tutorials and they run without any issues. All ros* commands (rosrun, roscore, roslaunch etc.) are there and working. I have to mention also that both compilation and linking of my nodes works without any errors. I have noticed one thing in particular in the errors I quoted above: Error string: Could not load library (Poco exception = libopencv_viz.so.2.4: cannot open shared object file: No such file or directory) The problem is I don't have such shared object on my computer. I've googled a little bit but all I got were 2 search results only one of which was handling this error in particular and it was said that it has something to do with image_geometry referencing an old version of OpenCV where this file was present but that is no longer the case. The CMakeLists.txt for my project is: cmake_minimum_required(VERSION 2.8.3) project(pmd_camboard_nano) #set(CMAKE_CXX_FLAGS "--std=gnu++11 ${CMAKE_CXX_FLAGS}") # Congrats! No C++11 :-/ http://stackoverflow.com/questions/32119437/weird-segmentation-fault-after-converting-a-ros-pointcloud2-message-to-pcl-point/32120491#32120491 find_package(catkin REQUIRED COMPONENTS roscpp sensor_msgs image_transport nodelet dynamic_reconfigure tf) generate_dynamic_reconfigure_options(cfg/PMD.cfg) catkin_package( INCLUDE_DIRS include CATKIN_DEPENDS roscpp sensor_msgs image_transport nodelet dynamic_reconfigure tf pcl_conversions pcl_msgs pcl_ros DEPENDS boost_system PCL ) # Set PMDSDK Requirements set(PMDSDK_ROOT_DIR ${PROJECT_SOURCE_DIR}/PMDSDK) # Change this if the path is different set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${PROJECT_SOURCE_DIR}/cmake/Modules/") # Register PMDSDK find_package(PMDSDK REQUIRED) # Register Boost find_package(Boost REQUIRED COMPONENTS system) # Register PCL find_package(PCL 1.7 REQUIRED COMPONENTS common io filters kdtree surface) # Includes, link directories and definitions # Place PCL includes before those of Catkin to detect the custom PCL install include_directories(include ${PCL_INCLUDE_DIRS} ${catkin_INCLUDE_DIRS} ${PMDSDK_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) #include_directories(include ${catkin_INCLUDE_DIRS} ${PCL_INCLUDE_DIRS} ${PMDSDK_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) link_directories(${PCL_LIBRARY_DIRS}) add_definitions(${PCL_DEFINITIONS}) add_definitions(-DPMD_PLUGIN_DIR="${PMDSDK_PLUGIN_DIR}/") ######### # BUILD # ######### # make sure configure headers are built before any node using them #set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) message(${CATKIN_DEVEL_PREFIX}) add_library(${PROJECT_NAME}_nodelet src/driver_nodelet.cpp src/pmd_camboard_nano.cpp) target_link_libraries(${PROJECT_NAME}_nodelet ${catkin_LIBRARIES} ${PMDSDK_LIBRARIES}) add_dependencies(${PROJECT_NAME}_nodelet ${PROJECT_NAME}_gencfg) add_executable(${PROJECT_NAME}_node src/driver_node) target_link_libraries(${PROJECT_NAME}_node ${catkin_LIBRARIES} ${PMDSDK_LIBRARIES}) add_dependencies(${PROJECT_NAME}_node ${PROJECT_NAME}_gencfg) add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS}) # TODO # Create a pipeline consisting of nodes for each processing step # Each node takes a point cloud message, processes it and then publishes the results # This will allow a very dynamic coupling between the processess working on the data from the PMD device # and will allow testing various combinations in order to achieve as good results as possible # The cloud handler receives point clouds either directly from the PMD publishing node (directly controlling # the PMD device) or from the cloud relay. Once a point cloud message is received, it is further processed add_executable(${PROJECT_NAME}_cloud_statistical_outlier_removal src/pmd_camboard_nano_cloud_statistical_outlier_removal) target_link_libraries(${PROJECT_NAME}_cloud_statistical_outlier_removal ${catkin_LIBRARIES} boost_system pthread) target_link_libraries(${PROJECT_NAME}_cloud_statistical_outlier_removal ${PCL_LIBRARIES}) #<---UNTIL LINKING ORDER IS RESOLVE USE ALL PCL LIBS # pcl_common pcl_io pcl_filters pcl_surface pcl_kdtree) add_executable(${PROJECT_NAME}_cloud_surface_smoothing_normal_estimation src/pmd_camboard_nano_cloud_surface_smoothing_normal_estimation) target_link_libraries(${PROJECT_NAME}_cloud_surface_smoothing_normal_estimation ${catkin_LIBRARIES} boost_system pthread) target_link_libraries(${PROJECT_NAME}_cloud_surface_smoothing_normal_estimation ${PCL_LIBRARIES}) add_executable(${PROJECT_NAME}_cloud_nurbs_mesh_generation src/pmd_camboard_nano_cloud_nurbs_mesh_generation) target_link_libraries(${PROJECT_NAME}_cloud_nurbs_mesh_generation ${catkin_LIBRARIES} boost_system pthread) target_link_libraries(${PROJECT_NAME}_cloud_nurbs_mesh_generation ${PCL_LIBRARIES}) Any ideas why it's so broken? EDIT: I decide to rebuild everything (custom installs stayed this time). ROS automatically detected and used my custom PCL (nice!) during the build process. This also pointed me in the more or less right direction - OpenCV or to be more precise - my custom build. Upon executing rviz now I get a much clearer message: rviz: error while loading shared libraries: libopencv_viz.so.2.4: cannot open shared object file: No such file or directory I have looked for this libopencv_viz but managed to find a small set of posts relating to it but in OpenCV 3.0. It is said that this module (which I've enabled when building my OpenCV - it uses VTK) is supposedly backported to the 2.4.9 branch from 3.0.0 but there seems to be some misplaced headers here and there. During the compilation and linking process of my OpenCV VTK didn't cause any problems. However due to the above mentioned hiccup it seems best to leave it out of my final build. EDIT2: The error seems not to be coming from libopencv_viz but from the overall location of the libs (that is why it's not a good idea to mix things). I cleaned my system from my custom OpenCV, rebuild it without the VTK support, rebuild all that required OpenCV (including RViz) and I this time I got that rviz: error while loading shared libraries: libopencv_nonfree.so.2.4: cannot open shared object file: No such file or directory I remembered that I also had to export my path and after doing that (ldconfig -v) I got the same invalid pointer error as above. It seems that all this might be coming from all the OpenCV-related packages. I guess I have to download those, modify them and then rebuild. Christ, building ROS from source even with little tweaks is such a tedious work... Originally posted by rbaleksandar on ROS Answers with karma: 299 on 2015-09-16 Post score: 1 Answer: Fixed it by doing a clean build with ONLY my custom built OpenCV and PCL inside my catkin workspace and then installing the packages. Originally posted by rbaleksandar with karma: 299 on 2015-09-21 This answer was ACCEPTED on the original site Post score: 0
{ "domain": "robotics.stackexchange", "id": 22647, "tags": "ros, librviz, debian, nodelet" }
Error : rospy.init_node() has already been called with different arguments:
Question: In my project i have one publisher file ( publish rpm for a wheel) and one subscriber file (calculate the speed of the wheel based on rpm value ) the code works fine until i try to publish the speed to another node called "speed" and it gives me this error rospy.init_node() has already been called with different arguments: the subscriber file code as below: import rospy from std_msgs.msg import String , Float64 import math def calculate_speed(rpm): radius = 5.1 speed = rpm.data*2.0*(math.pi)*radius print(speed) def rpm_subscriber(): rospy.init_node('rpm_Subscriber_Node') rospy.Subscriber('rpm' , Float64 , calculate_speed) def speed_publish(): rospy.init_node("speed_Publisher_Node") pub = rospy.Publisher("speed" , Float64 , queue_size=10) rate = rospy.Rate(5) pub.publish(speed) if __name__ == '__main__': rpm_subscriber() speed_publish() print('DONE') please help i have been stuck here for an hours Originally posted by tq1992 on ROS Answers with karma: 1 on 2021-02-12 Post score: 0 Answer: Well, the error is pretty clear. You have two calls to rospy.init_node() in your code. One executable equals one ROS node, and should thus only have one call to rospy.init_node(). You'd need to split your code into two executables, if this should be two nodes... Originally posted by mgruhler with karma: 12390 on 2021-02-12 This answer was ACCEPTED on the original site Post score: 1 Original comments Comment by tq1992 on 2021-12-21: thanks man
{ "domain": "robotics.stackexchange", "id": 36078, "tags": "ros" }
Different versions of Schwinger parameterization
Question: One common used trick when calculating loop integral is Schwinger parameterization. And I have seen two versions among wiki, arxiv and lecture notes. $$\frac{1}{A}=\int_0^{\infty} \mathrm{d}t \ e^{-tA}$$ or, $$\frac{-i}{(-i)A}=-i\int_0^{\infty} \mathrm{d}t \ e^{itA}$$ where $A=p^2-m^2+i\epsilon$. I know the latter is surely true since its real part $Re(-iA)=\epsilon\gt0$ and thus applicable for the equation $$\frac{1}{a}=\int_0^{\infty} \mathrm{d}t \ e^{-at}\ \text{ for } Re(a)\gt0.$$ But as for the former, it doesn't hold true for space-like which the loop momentum probably behaves like , i.e. $p^2-m^2\lt0$. I am very confused why so many people still use the first one and any explanation will be appreciated! Answer: The Schwinger parameter itself is manifestly positive. In particular, it is not Wick-rotated, so there are not different versions of it. Rather it is OP's $A$ operator that is Wick-rotated. OP lists a few references in above comments. Ref. 1 works in Euclidean signature, so it's well-defined. Ref. 2 & 3 only use the Schwinger parameter to derive the Feynman parametrization. They, on the other hand, want to work in Minkowskian signature. In practice one would then have to argue (presumably case by case) if one can analytically continue/Wick-rotate from Euclidean signature to Minkowskian signature (thereby introducing the Feynman $i\epsilon$-prescription). References: H. Kleinert & V. Schulte-Frohlinde, Critical Properties of $\phi^4$-Theories; chapter 8, p. 106. J.A. Shapiro, Schwinger trick and Feynman Parameters, 2007 lecture notes; p. 1. S. Weinzierl, The Art of Computing Loop Integrals, arXiv:hep-ph/0604068; p. 11.
{ "domain": "physics.stackexchange", "id": 78478, "tags": "quantum-field-theory, metric-tensor, conventions, propagator, wick-rotation" }
Chemistry from a physical perspective
Question: I'm currently learning chemistry for the first time, and loving it. I have a reasonably good physics and maths background and it's great to see things like spherical harmonics in quantum mechanics "take shape" through chemistry in the form of orbitals. Overall, I am seeing a lot of physics and some mathematics present. However my source is not entirely rigorous, at least by my standards. Could anyone recommend a text or source that explains chemistry topics from a physics point of view rigorously? Examples of questions I am struggling with (from a physical viewpoint) are Why do bondings occur? What is the distance of seperation? Why is a water molecule bent? Any help appreciated! Answer: A classic book is "Molecular Quantum Mechanics" by Atkins. Possibly also "The Chemical Bond" by Murrell, Kettle and Tedder, though I think Atkins is better.
{ "domain": "physics.stackexchange", "id": 9628, "tags": "soft-question, resource-recommendations, education, physical-chemistry, quantum-chemistry" }
Restriction Forces in Lagrangian Mechanics
Question: I was recently preparing for a test on Classical Mechanics and a friend of mine started wondering if there was any method through which we could obtain the restriction forces acting on a certain particle without using the Lagrange Multipliers method. At first glance my feeling would be that since Lagrangian Mechanics deals primarily with action and energies and not with forces, the answer would be no. But I'm really curious, any help would be appreciated. Answer: If the constraints are holonomic one doesn't need Lagrange multipliers to solve Lagrange equations $$ \frac{d}{dt}\frac{\partial T}{\partial \dot{q}^j}-\frac{\partial T}{\partial q^j}~=~Q^a_j,\qquad j~\in~\{1,\ldots, n\},\tag{1}$$ where $$Q^a_j~=~\sum_{i=1}^N {\bf F}^a_i\cdot \frac{\partial {\bf r}_i}{\partial q^j},\qquad j~\in~\{1,\ldots, n\},\tag{2}$$ is the $j$'th applied generalized force. The constraint force ${\bf F}^c_{i}$ on the $i$'th point particle with position $${\bf r}_i(q^1, \ldots, q^n,t),\qquad i~\in~\{1,\ldots, N\},\tag{3}$$ can then in principle be reconstructed via Newton's 2nd law $${\bf F}^c_{i}~=~\dot{\bf p}_i-{\bf F}^a_i,\tag{4}$$ where ${\bf F}_i^a$ is the applied force from eq. (2). See also this related Phys.SE post. References: H. Goldstein, Classical Mechanics, Chapters 1 & 2.
{ "domain": "physics.stackexchange", "id": 53021, "tags": "forces, classical-mechanics, lagrangian-formalism, coordinate-systems, constrained-dynamics" }
Phasor representation of voltage in frequency domain
Question: In a text on application of electromagnetism in transmission line, there introduces a phasor for the voltage (in frequency domain) $$\tilde{V}(x) = V^+e^{-i\beta x} + V^-e^{i\beta x.}$$ Here $V^+$ and $V^-$ are the amplitude of the incoming wave and reflected wave. My question is the exponential $e^{-i\beta x}$ and $e^{i\beta x}$ are already complex, so should $V^+$ and $V^-$ be real or complex? and why? Answer: Complex $V^+$ and $V^-$ (or in practice, just the reflected wave amplitude) represent the relative phase shift between the two signals.
{ "domain": "physics.stackexchange", "id": 7586, "tags": "electromagnetism, electromagnetic-radiation, waves, complex-numbers" }
Quantum entanglement correlations - role of probability
Question: Suppose in an entanglement experiment, angles of measurement are fixed. QM predicts that $50$% pairs will measure same spin. Ignoring other details, just consider QM predicted value is $50$%. Then does QM also predicts the distribution of same spin pairs? If so, is that predicted distribution any different from that of any other event with $50$% probability, like tossing a coin? To clarify further - say probability of getting a head in a coin toss is $50$%. If we toss the coin $100$ times, probability says - expected number of $10$ consecutive heads is = $(100 - 9)*(.5)^{10}$. Would this be also the expected number of $10$ consecutive same spin pairs out of $100$ pairs with same spin pair prediction at $50$%. Please ignore the fact that $100$ is a small number for entanglement. The question is just about the concept. Further clarification - As far as state is concerned, you may consider/assume any state. Only thing is that the number of same spin pairs is predicted at 50%. I said 50 because it can be compared with a coin toss. The real question is - given a prediction of 50%, is expected number of m consecutive same spin pairs, same as the expected number of m consecutive heads in a coin toss. The probability of getting a same spin pair and that of getting a head being same. Total number of entangled pairs measured also same as total number of coin tosses. Answer: The answer to the question is yes. It is not possible to derive this result from within quantum theory, rather it is stated as a postulate (the Born rule) that the mod square of the coefficients in wave function expansions should be interpreted as a probability, with all the usual implications of probability from statistical physics. As such strictly the wave function of a system has no meaning for a single measurement but can only be given meaning in the context of measurements of an ensemble of identically prepared systems, in which case the probability is defined as a frequency of results of a large sample of runs. The Statistical (or Ensemble) Interpretation of quantum mechanics formalises this idea, and can be viewed as the minimalist interpretation consistent with the mathematical formalism. It is an open question whether there is an underlying theory from which outcomes of individual experiments could be predicted. Edit: There have been attempts to derive the Born rule, for example from Envariance (http://arxiv.org/abs/quant-ph/0405161) or from the Many Worlds Interpretation. It is debatable to what extent these arguments are circular.
{ "domain": "physics.stackexchange", "id": 33199, "tags": "quantum-mechanics, quantum-entanglement" }
Calculate questions per day on CodeGolf.SE
Question: I wrote a short script in Python 3 that connects to the Stack Exchange API, gets all questions on Programming Puzzles & Code Golf over the past two weeks, and determines the average number of questions per day as well as the average number of answers per question. The number of questions per day is intended to match that on Area 51, which it does. Obviously it's much easier to just scrape Area 51 directly, but I wanted to figure it out myself for practice. I'm not an expert with Python or with web APIs, so I was hoping you fine Code Review folks can help me improve my practices. import requests, datetime, time def seconds_since_epoch(dt): epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) return int((dt - epoch).total_seconds()) today = datetime.datetime.now(datetime.timezone.utc) params = { "site": "codegolf", "fromdate": seconds_since_epoch(today - datetime.timedelta(days=14)), "todate": seconds_since_epoch(today), "pagesize": 100, "page": 1 } base_url = "https://api.stackexchange.com/2.2" results = [] while True: req = requests.get(base_url + "/questions", params=params) contents = req.json() results.extend(contents["items"]) if not contents["has_more"]: break if "backoff" in contents: time.sleep(contents["backoff"]) params["page"] += 1 questions_per_day = len(results) / 14 answers_per_question = sum([q["answer_count"] for q in results]) / len(results) print("Over the past 2 weeks, PPCG has had...") print(round(questions_per_day, 1), "questions per day") print(round(answers_per_question, 1), "answers per question") My approach is to build the query using a dict and make the request to the API using the requests module. I set the page size to the maximum to reduce the number of requests made so that the daily quota isn't exhausted quite so fast. The code is hosted on GitHub, should you want to fork and adapt it for your own purposes, assuming it isn't too terrible. Answer: Your seconds_since_epoch function has a built-in Python equivalent, datetime.timestamp. Your namespaces would be cleaner if you did from datetime import datetime, timezone. You use a base_url variable, but do not use urllib.parse.urljoin. Either use a hardcoded URL, or properly join the base URL with the fragment. results is better named as questions. In sum([q["answer_count"] for q in results]) the [] is superfluous and inefficient. Instead of printing 3 times in a row create a multiline format string and print once. You never create a function that returns the questions, and do not define a main function. I suggest printing in the main function, that calls a function that gets and returns the question information. This is how I would program it: import requests import time from datetime import datetime, timezone, timedelta def get_question_info(site, start, stop): API_URL = "https://api.stackexchange.com/2.2/questions" req_params = { "site": site, "fromdate": int(start.timestamp()), "todate": int(stop.timestamp()), "pagesize": 100, "page": 1 } questions = [] while True: req = requests.get(API_URL, params=req_params) contents = req.json() questions.extend(contents["items"]) if not contents["has_more"]: break req_params["page"] += 1 if "backoff" in contents: time.sleep(contents["backoff"]) return questions def get_area51_estimate(site): now = datetime.now(timezone.utc) fortnight_ago = now - timedelta(days=14) questions = get_question_info(site, fortnight_ago, now) avg_questions = len(questions) / 14 avg_answers = sum(q["answer_count"] for q in questions) / len(questions) return avg_questions, avg_answers if __name__ == "__main__": msg = """Over the past 2 weeks, PPCG has had... {:.1f} questions per day {:.1f} answers per question""" print(msg.format(*get_area51_estimate("codegolf")))
{ "domain": "codereview.stackexchange", "id": 18598, "tags": "python, python-3.x, stackexchange" }