anchor stringlengths 0 150 | positive stringlengths 0 96k | source dict |
|---|---|---|
Parcel size classifier | Question: I submitted a technical assignment for a job interview process and I am stressing out wondering if my submission was any good. I would like to learn and be a better developer, so please give me pointers about what I could improve on.
GitHub link
using System;
namespace ParsetheParcel
{
interface ICalculatable<Parcel>
{
string Calculator(Parcel p);
}
public class Parcel
{
//(fun fact, store currency as decimal and not float/double)
public const decimal CostSmall = 5M;
public const decimal CostMedium = 7.5M;
public const decimal CostLarge = 8.5M;
//Uppser limit placed on the weight
public const double PackageWeight = 25;
public double Weight { get; set; }
public double Length { get; set; }
public double Height { get; set; }
public double Breadth { get; set; }
public enum SizeSmall
{
length = 200,
breadth = 300,
height = 150,
}
public enum SizeMedium
{
length = 300,
breadth = 400,
height = 200,
}
public enum SizeLarge
{
length = 400,
breadth = 600,
height = 250
}
}
class Program : ICalculatable<Parcel>
{
static void Main(string[] args)
{
//show welcome message
Console.WriteLine("Kia ora, welcome to the trademe parcel calculator!\n");
//start asking for inputs
//ask for dimensions
Console.WriteLine("Please enter the dimensions of your parcel and the unit (mm, cm or m) when promted:\n");
Console.Write("Enter your dimensions for length: ");
string length = GetInput();
Console.Write("\nEnter your dimensions for breadth: ");
string breadth = GetInput();
Console.Write("\nEnter your dimensions for height: ");
string height = GetInput();
Console.Write("\nEnter the unit used for length, breadth and height (valid inputs: mm or cm or m): ");
string dimenunit = GetUnitDimen();
//ask for weight
Console.WriteLine("\n Please Enter the weight of your parcel and the unit (g, kg) when promted: \n");
Console.Write("Enter your weight: ");
string weight = GetInput();
Console.Write("\nEnter the unit used for used for weight (valid inputs: kg or g): ");
string weightunit = GetUnitWeight();
Parcel myparcel = Converter(length, breadth, height, dimenunit, weight, weightunit);
Program p = new Program();
Console.WriteLine("\n"+p.Calculator(myparcel));
//make sure the console doesnt shutdown prematurely
Console.WriteLine("\nPress Return key (enter) to Exit");
Console.Read();
}
/* Function determines if what package type appropriate for the parcel*/
public string Calculator(Parcel parcel)
{
//check if package meets minimum 25kg limit first
if (parcel.Weight <= Parcel.PackageWeight)
{
//check if package is bigger than largest available first so it doesnt have to check through the other ifs
if (parcel.Length > (double)Parcel.SizeLarge.length ||
parcel.Breadth > (double)Parcel.SizeLarge.breadth || parcel.Height > (double)Parcel.SizeLarge.height)
{
return "Sorry your Parcel is too large for us to ship!" +
"\nOur Largest Package: " + (double)Parcel.SizeLarge.length + "mm x" + (double)Parcel.SizeLarge.breadth + "mm x" + (double)Parcel.SizeLarge.height + "mm" +
"\nYour Dimensions:" + parcel.Length + "mm x" + parcel.Breadth + "mm x" + parcel.Height + "mm";
}
//check if package can fit in smallest first
if (parcel.Length <= (double)Parcel.SizeSmall.length &&
parcel.Breadth <= (double)Parcel.SizeSmall.breadth && parcel.Height <= (double)Parcel.SizeSmall.height)
{
return "Your Parcel can fit in our small package type! The cost would be: " + Parcel.CostSmall.ToString("C");
}
else if (parcel.Length <= (double)Parcel.SizeMedium.length &&
parcel.Breadth <= (double)Parcel.SizeMedium.breadth && parcel.Height <= (double)Parcel.SizeMedium.height)
{
return "Your Parcel can fit in our Medium package type! The cost would be: " + Parcel.CostMedium.ToString("C");
}
else
{
/*we already know the parcel is not greater than the largest availbe and we know that it wont fit in small or medium*/
return "Your Parcel can fit in our Large package type! The cost would be: " + Parcel.CostLarge.ToString("C");
}
}
else //parcel too heavy
{
return "Sorry your Parcel is too heavy for us to ship! Our Limit: " + Parcel.PackageWeight + " Your Parcel Weight: " + parcel.Weight;
}
}
/* Gets dimensions and weight values from the user*/
private static string GetInput()
{
string Value; double result;
do
{
Value = Console.ReadLine();
if (!string.IsNullOrEmpty(Value) && double.TryParse(Value, out result))
{
return Value;
}
else
{
Console.WriteLine("Empty or incorrect input, please try again by entering a number only");
}
} while (string.IsNullOrEmpty(Value) || !double.TryParse(Value, out result));
return Value;
}
/* Gets the unit input for the dimensions (mm, cm, m) */
private static string GetUnitDimen()
{
string Value;
do
{
Value = Console.ReadLine();
if (!string.IsNullOrEmpty(Value) && Value.Equals("m", StringComparison.OrdinalIgnoreCase)
|| Value.Equals("mm", StringComparison.OrdinalIgnoreCase)
|| Value.Equals("cm", StringComparison.OrdinalIgnoreCase))
{
return Value;
}
else
{
Console.WriteLine("Empty or incorrect input, please try again");
}
} while (string.IsNullOrEmpty(Value) && !Value.Equals("m", StringComparison.OrdinalIgnoreCase)
|| !Value.Equals("mm", StringComparison.OrdinalIgnoreCase)
|| !Value.Equals("cm", StringComparison.OrdinalIgnoreCase));
return Value;
}
/* Gets the unit input for the weight (kg, g) */
private static string GetUnitWeight()
{
string Value;
do
{
Value = Console.ReadLine();
if (!string.IsNullOrEmpty(Value) && Value.Equals("kg", StringComparison.OrdinalIgnoreCase)
|| Value.Equals("g", StringComparison.OrdinalIgnoreCase))
{
return Value;
}
else
{
Console.WriteLine("Empty or incorrect input, please try again");
}
} while (string.IsNullOrEmpty(Value) && !Value.Equals("kg", StringComparison.OrdinalIgnoreCase)
|| !Value.Equals("g", StringComparison.OrdinalIgnoreCase));
return Value;
}
/*convert dimensions to mm if it isnt already and convert weight to kg if it isnt already */
private static Parcel Converter(string length, string breadth, string height, string dimenunit, string weight, string weightunit)
{
double result;
double[] parcel = new double[4];
if (double.TryParse(length, out result))//this should always pass as we have sanitized it in getInput Function
{
parcel[0] = result; //length
}
if (double.TryParse(breadth, out result))//this should always pass as we have sanitized it in getInput Function
{
parcel[1] = result; //breadth
}
if (double.TryParse(height, out result))//this should always pass as we have sanitized it in getInput Function
{
parcel[2] = result; //height
}
if (double.TryParse(weight, out result))//this should always pass as we have sanitized it in getInput Function
{
parcel[3] = result; //weight
}
//check units and convert to mm and kg if needed
if (dimenunit.Equals("m", StringComparison.OrdinalIgnoreCase))
{
//convert m to mm for comparision
parcel[0] = parcel[0] * 1000; //length convert to mm
parcel[1] = parcel[1] * 1000; //breadth convert to mm
parcel[2] = parcel[2] * 1000; //height convert to mm
}
else if (dimenunit.Equals("cm", StringComparison.OrdinalIgnoreCase))
{
//convert cm to mm for comparision
parcel[0] = parcel[0] * 10; //length convert to mm
parcel[1] = parcel[1] * 10; //breadth convert to mm
parcel[2] = parcel[2] * 10; //height convert to mm
}
if (weightunit.Equals("g", StringComparison.OrdinalIgnoreCase))
{
//convert gram to kg for comprasion
parcel[3] = parcel[3] / 1000;
}
Parcel p = new Parcel()
{
Length = parcel[0],
Breadth = parcel[1],
Height = parcel[2],
Weight = parcel[3]
};
return p;
}
}
}
Answer: Usability
Your design is pretty string-heavy. If you want to build reusable functions, you should try to avoid this and work with more appropriate types given the context.
One such example is your interface design:
interface ICalculatable<Parcel>
{
string Calculator(Parcel p);
}
What can consumers do with the returned string? This interface can only be used to render results to a UI like the console. Consider returning a decimal or a response class that stores all the required data for consumers to act upon.
return "Your Parcel can fit in our small package type! The cost would be: " + Parcel.CostSmall.ToString("C");
could be:
return Parsel.CostSmall;
Another example of returning the wrong type is GetInput. Internally you parse the data to double only to return the raw string. This seems a bit nuts to me.
string Value; double result;
// ..
if (!string.IsNullOrEmpty(Value) && double.TryParse(Value, out result))
{
return Value; // should return result instead
}
// ..
Design
ICalculatable<Parcel>
interface ICalculatable<Parcel>
{
string Calculator(Parcel p);
}
Start by defining proper names. The interface is used to calculate the cost price of a Parcel. The interface is hence a ICalculator, not ICalculatable. The parcel would be the latter.
The name of the operation should be a verb specifying the action. CalculateCostPrice is a verb and is more specific than the generic Calculator.
If using generics, use a type T both as generic class type and input parameter for the method. But since this interface is specific to calculating the cost price, I would not use a generic class, unless you define an interface for types that have a cost price. This would be an exercise left for you to explore.
As already discussed, return a more useful return type. Let's assume all you want is the cost price, so a decimal would do.
/// <summary>
/// Performs calculations on Parcel instances.
/// </summary>
interface ICalculator
{
/// <summary> Calculates the cost price of a Parcel in <unit>. </summary>
decimal CalculateCostPrice(Parcel parcel);
}
General
Consider making Parcel immutable. This ensures better encapsulation of its state.
Enum names should be PascalCased, not camelCased.
Don't let the entrypoint Program implement an interface. Create a specific ParcelCalculator class.
Make sure to split API logic from end-user rendering. The string-based API is an anti-pattern for usability and reusability.
I like the fact you put all user input parsing in Program, not in the API.
Conclusion
I would focus on making a good design, think good about the arguments, return types and names of interfaces and methods. And don't forget to document interfaces to provide a clear Specification for consumers that focuses on Usability.
Don't let rendering pollute the API. Remove all string-based messages from the API and put them in the presentation layer. This is called Separation of Concerns. | {
"domain": "codereview.stackexchange",
"id": 36110,
"tags": "c#, .net, interview-questions, console, interface"
} |
What determines the color of the light emitted in a Tokamak? | Question: We see images of Tokamak plasma with all sorts of colours from red to purple. Why do we see any light at all, since the plasma should be so hot to have dissociated all its electrons? It is all from contamination or unwanted cooling?
Answer: As Maury Markowitz mentioned, the color of the edge of the plasma (the core is colorless and transparent) is determined by the composition of the partially-ionized gas that is recombining with plasma electrons and undergoing transitions at the edge of the plasma. Sometimes this gas is purposely injected (as was the case with DCX and xenon gas), and sometimes it is simply residual gas in the chamber. For the tokamak MAST, it is the latter; since the plasma is made of deuterium, the optical radiation consists of the spectral lines of the dueterium atom, which gives a pinkish glow to the edges of the plasma.
This is consistent with the explanation given by the ITER tokamak's public pages here: https://www.iter.org/newsline/258/1512 | {
"domain": "physics.stackexchange",
"id": 54050,
"tags": "visible-light, plasma-physics, photon-emission"
} |
From the local Hooke's law to the global one | Question: My system consist of a cylinder with axis Z that can contract and dilate along this axis. It obeys microscopically Hooke's law of elasticity:
$${\cal{L}}=\frac12\rho\dot{u_z}^2-\frac12C_{zzzz}(\partial_z u_z)^2$$
with $\cal{L}$ the Lagrangian density, $u_z(z,t)$ the local displacement to the rest position, $C_{zzzz}$ the unique component of the stiffness tensor (equal to the Young Modulus $Y$) relevant here considering the unique degree of freedom of my system, and $\rho$ of course for density.
Supposing that the stretch is uniform in my cylinder $u_z(z,t)$ obeys the following equation:
$$u_z(z,t)=\frac{z}{l}\delta z(t)$$ with $l$ the rest length of the cylinder, $\delta z$ the total stretch at its top, and the base being being static by exterior constraint and defined as $z=0$.
My goal is to rederive from there the global Hooke law, by integrating the Lagrangian density over x,y,z;
But from there I arrive at:
$$L=\frac12m\delta\dot{z}^2-\frac12(\frac{3C_{zzzz} S}{l})\delta z^2$$
My problem is that I get a wrong value for the global stiffness of my bar: $K=3\frac{YS}{l}$, the factor 3 shouldn't be there. Is it purely geometrical or is there a misunderstanding in my reasoning? It may seem dull at first to derive this result, but I'll be way more confident in seeing the right result emerging from this calculus.
Plus, if I had added a dissipative term to my first Lagrangian density, would it be possible to derive a damping term for my global oscillations. Should I need to work it out from the wave equations? (because dissipative terms and Lagrangian description don't seem quite compatible for me).
Answer: Your erroneous assumption is that $C_{zzzz}$ corresponds directly to the Young's modulus. In fact, it does not, but in the case of your example is 1/3 of the Young's modulus. Therefore, the global stiffness is, in fact, $\frac{Y S}{l}$, as expected. To understand the relationship between the Young's modulus and the coefficient $C_{zzzz}$, take a look at Landau and Lifshitz, paying particular attention to equations (4.1) through (4.3), (5.3) and (5.5). (If you aren't convinced these equations apply to your problem, take a look at equation (10.3)... if it still doesn't make sense, look at equation (1.3).) | {
"domain": "physics.stackexchange",
"id": 6592,
"tags": "solid-state-physics, harmonic-oscillator, elasticity"
} |
How to simulate vacuum clamping? | Question:
I want simulate a robot with vacuum cups. The robot moves the cups to touch a light part. When the cups touch the part, vacuum is activated and the part is clamp by the cups.
While vacuum is activated, the part follow the movement of the vacuun cups.
Could it be simulated with a pluging ?
Originally posted by jalfonso on Gazebo Answers with karma: 31 on 2014-05-28
Post score: 2
Answer:
Hi,
you could check for contacts between the cups and the object, when contact is detected you can dynamically create a fixed joint between the two links to simulate the vacuum, thus the object will follow the movement of the vacuum cup.
If the idea suits you, check out the gazebo/physics/Gripper.cc example where fix joints are created. For detecting the contact use a contact sensor, here is a tutorial.
Cheers,
Andrei
Originally posted by AndreiHaidu with karma: 2108 on 2014-05-29
This answer was ACCEPTED on the original site
Post score: 3
Original comments
Comment by mehdi on 2015-06-11:
Is there any ROS interface for dynamically spawning such a joint?
Comment by Abdullah on 2016-08-29:
Thank you so much for the answer, I was just thinking the same thing for the same problem. Yet when I see Gripper class I would have to use the following code right?
Comment by Abdullah on 2016-08-29:
<grasp_check>
<attach_steps>5</attach_steps>
<detach_steps>10</detach_steps>
<min_contact_count>2</min_contact_count>
</grasp_check>
<gripper_link>gripper_active_finger_link</gripper_link>
<palm_link>gripper_static_finger_link</palm_link>
Comment by Abdullah on 2016-08-29:
So what if we do not use any other link but only one link palm link , would it work? | {
"domain": "robotics.stackexchange",
"id": 3594,
"tags": "gazebo-all-versions"
} |
Internal energy of ideal gas in adiabatic process | Question:
In above diagram it can be seen that the curve for adiabatic compression is more steeper than curve for adiabatic expansion. The difference between p1 and p4 is more than p2 and p3 - it means that the weight lifted from the piston during adiabatic expansion is less than the weight added to the piston in adiabatic compression. This implies that the change in internal energy in over all Carnot cycle is greater than zero since the internal energy which decreased in adiabatic expansion is less than the energy gained during adiabatic compression. Kindly tell me where I am wrong.
Answer: The fact that the difference between $p_1$ and $p_4$ is greater than the difference between $p_2$ and $p_3$ does not actually imply that the increase of the internal energy during the adiabatic compression is greater than the decrease of the internal energy during the adiabatic expansion. They are the same, since the internal energy of an ideal gas is determined by its temperature and the temperature difference is the same in both cases.
Your conclusion is based on the assumption that the work is determined by the pressure only, but the work is also determined by the volume change, which is greater during the adiabatic expansion than during the adiabatic compression. | {
"domain": "physics.stackexchange",
"id": 50691,
"tags": "thermodynamics, heat-engine"
} |
Move_base doesn't subscribe to "/scan" topic from RpLidar | Question:
Greetings,
I'm trying to configure move_base on a Jetson Nano board (Ubuntu 18.04, ROS Melodic) equipped with an RpLidar and ZED2 camera. The RpLidar provides the "/scan" topic which updates the local_costmap. Unfortunately, I can't get the "/scan" topic to get linked to move_base and I can't figure out how to link them both together.
I am following the tutorial on this page: move_base - ROS Wiki
Here are the config files for move_base on the jetbot:
Launch File:
<launch>
<!-- NODE1: ZED2 NODE CREATION -->
<arg name="svo_file" default="" /> <!-- <arg name="svo_file" default="path/to/svo/file.svo"> -->
<arg name="stream" default="" /> <!-- <arg name="stream" default="<ip_address>:<port>"> -->
<arg name="node_name" default="zed_node" />
<arg name="camera_model" default="zed2" />
<arg name="publish_urdf" default="true" />
<arg name="camera_name" default="zed2" />
<arg name="base_frame" default="base_link" />
<arg name="cam_pos_x" default="0.0" /> <!-- Position respect to base frame (i.e. "base_link) -->
<arg name="cam_pos_y" default="0.0" /> <!-- Position respect to base frame (i.e. "base_link) -->
<arg name="cam_pos_z" default="0.0" /> <!-- Position respect to base frame (i.e. "base_link) -->
<arg name="cam_roll" default="0.0" /> <!-- Orientation respect to base frame (i.e. "base_link) -->
<arg name="cam_pitch" default="0.0" /> <!-- Orientation respect to base frame (i.e. "base_link) -->
<arg name="cam_yaw" default="0.0" /> <!-- Orientation respect to base frame (i.e. "base_link) -->
<group ns="$(arg camera_name)">
<include file="$(find zed_wrapper)/launch/include/zed_camera.launch.xml">
<arg name="camera_name" value="$(arg camera_name)" />
<arg name="svo_file" value="$(arg svo_file)" />
<arg name="stream" value="$(arg stream)" />
<arg name="node_name" value="$(arg node_name)" />
<arg name="camera_model" value="$(arg camera_model)" />
<arg name="base_frame" value="$(arg base_frame)" />
<arg name="publish_urdf" value="$(arg publish_urdf)" />
<arg name="cam_pos_x" value="$(arg cam_pos_x)" />
<arg name="cam_pos_y" value="$(arg cam_pos_y)" />
<arg name="cam_pos_z" value="$(arg cam_pos_z)" />
<arg name="cam_roll" value="$(arg cam_roll)" />
<arg name="cam_pitch" value="$(arg cam_pitch)" />
<arg name="cam_yaw" value="$(arg cam_yaw)" />
</include>
</group>
<remap from="/odom" to="/zed2/zed_node/odom"/>
<!-- NODE 2: RP_Lidar -->
<node name="rplidarNode" pkg="rplidar_ros" type="rplidarNode" output="screen">
<param name="serial_port" type="string" value="/dev/ttyUSB0"/>
<param name="serial_baudrate" type="int" value="115200"/><!--A1/A2 -->
<param name="frame_id" type="string" value="laser_frame"/>
<param name="inverted" type="bool" value="false"/>
<param name="angle_compensate" type="bool" value="true"/>
</node>
<!-- NODE 3: ROS Jetbot -->
<node name="jetbot_control_node" pkg="ros_jetbot" type="controll.py"/>
<!-- NODE 4: Move Base Wrapper (Configuration) -->
<node pkg="move_base" type="move_base" respawn="false" name="move_base" output="screen">
<rosparam file="$(find zed_move_base)/config/costmap_common_params.yaml" command="load" ns="global_costmap" />
<rosparam file="$(find zed_move_base)/config/costmap_common_params.yaml" command="load" ns="local_costmap" />
<rosparam file="$(find zed_move_base)/config/local_costmap_params.yaml" command="load" ns="local_costmap" />
<rosparam file="$(find zed_move_base)/config/global_costmap_params.yaml" command="load" ns="global_costmap" />
<rosparam file="$(find zed_move_base)/config/base_local_planner_params.yaml" command="load" />
</node>
</launch>
Costmap_common_params.yaml:
obstacle_range: 2.5
raytrace_range: 3.0
footprint: [[0.05, 0.05], [0.05, -0.05], [-0.05, -0.05], [-0.05, 0.05]]
inflation_radius: 0.3
footprint_padding: 0.05
observation_sources: laser_scan_sensor
laser_scan_sensor: {sensor_frame: laser_frame, data_type: LaserScan, topic: scan, marking: true, clearing: true}
Local_costmap_params.yaml:
global_frame: odom
robot_base_frame: base_link
update_frequency: 6.0
publish_frequency: 5.0
static_map: false
rolling_window: true
width: 10.0
height: 10.0
resolution: 0.05
plugins:
- {name: obstacles_laser, type: "costmap_2d::ObstacleLayer"}
- {name: inflation, type: "costmap_2d::InflationLayer"}
Global_costmap_params.yaml:
global_frame: map
robot_base_frame: base_link
update_frequency: 6.0
static_map: false
rolling_window: true
track_unknown_space: true
width: 20.0
height: 20.0
resolution: 0.05
plugins:
- {name: obstacles_laser, type: "costmap_2d::ObstacleLayer"}
- {name: inflation, type: "costmap_2d::InflationLayer"}
Base_local_planner_params.yaml:
controller_frequency: 5.0
recovery_behaviour_enabled: true
clearing_rotation_allowed: false
TrajectoryPlannerROS:
max_vel_x: 0.5
min_vel_x: 0.3
max_vel_theta: 1.0
min_in_place_vel_theta: 0.5
acc_lim_theta: 3.2
acc_lim_x: 2.5
acc_lim_y: 2.5
holonomic_robot: false
meter_scoring: true
How can I get move_base to publish to the "/scan" topic? Is there anything missing/wrong in the config files?
Originally posted by Hlezzaik on ROS Answers with karma: 3 on 2021-12-14
Post score: 0
Answer:
It may be that observation_sources and plugins are not connected. You can probably subscribe with the following
Local_costmap_params.yaml:
global_frame: odom
robot_base_frame: base_link
update_frequency: 6.0
publish_frequency: 5.0
static_map: false
rolling_window: true
width: 10.0
height: 10.0
resolution: 0.05
plugins:
- {name: obstacles_laser, type: "costmap_2d::ObstacleLayer"}
- {name: inflation, type: "costmap_2d::InflationLayer"}
# add
obstacles_laser:
observation_sources: laser_scan_sensor
laser_scan_sensor: {sensor_frame: laser_frame, data_type: LaserScan, topic: scan, marking: true, clearing: true}
Originally posted by miura with karma: 1908 on 2021-12-14
This answer was ACCEPTED on the original site
Post score: 1
Original comments
Comment by Hlezzaik on 2021-12-15:
Thanks, I just tried that and it actually worked!!
Comment by miura on 2021-12-15:
@Hlezzaik I'm glad it worked out.
Could you please click on the collect symbol to indicate that it is the correct answer?
Comment by Hlezzaik on 2021-12-16:
Thank you, just did that.
I'm having a new problem now though, with the config files as above and adding the obstacles laser to the local costmap, I sense there's something missing for the global costmap.
I'm getting the following bugs whenever I tell the robot to go to a specific point:
Could not transform the global plan to the frame of the controller
The origin of the sensor at (1.68, 0.10) is out of map bounds. So, the costmap cannot raytrace for it.
The goal sent to the navfn planner is off the global costmap. Planning will always fail to this goal.
Any suggestions?
Comment by miura on 2021-12-16:
Thank you for clicking.
I think that creating map information with gmapping or map_server and reflecting it to the global cost map may solve the problem.
If you have additional questions, could you please create a new question page? | {
"domain": "robotics.stackexchange",
"id": 37251,
"tags": "navigation, ros-melodic, rplidar, costmap, move-base"
} |
Proving that the water leaving a vertical pipe is exponential (decay) | Question: How can I prove that the rate of which water leaves a vertical cylindrical container (through a hole at the bottom) is exponential of the form :
$$Ae^{kx}$$
I know that Torricelli's law is:
$$\sqrt{2gh}$$
But this only proves a square root relationship.
I have data points every 10 seconds and graphed it suggests a decay function.
I know the distance between the pipe is 1.5M and the internal diameter is 5cm. The hole diameter is 0.25cm, if this helps.
I need to prove that the water leaving the pipe is exponentially decaying.
Answer: We don't give solutions to homework problems, but we can explore the physics a bit.
You need to set up a differential equation for the change in height with time. The way to do this is to note that if the speed of the water flowing out through the hole is $v$, and the area of the hole is $a$, then the volume flowing out per second is just $av$. The outflow per second is just the change in the volume in the pipe, so we get the differential equation:
$$ \frac{dV}{dt} = -av $$
where $V$ is the volume in the pipe. The next step is to note that if the area of the pipe is $A$ then the volume in the pipe is $V = Ah$ and because $A$ is constant:
$$ \frac{dV}{dt} = A\frac{dh}{dt} $$
So our equation for $dV/dt$ turns into:
$$ \frac{dh}{dt} = -\frac{a}{A}v $$
You mention Torricelli's law. If you use this to substitute for $v$ it's going to give you:
$$ \frac{dh}{dt} = -\frac{a\sqrt{2g}}{A}\,h^{1/2} \tag{1} $$
We solve differential equations like this by rearranging to give:
$$ \frac{dh}{h^{1/2}} = -\frac{a\sqrt{2g}}{A}\,dt $$
and then just integrate:
$$ \int\frac{dh}{h^{1/2}} = -\frac{a\sqrt{2g}}{A}\int dt $$
But this isn't going to give you an exponential decay of $h$ with time. If you measure an exponential decay (you don't say whether this is what you measure in the experiment) then Torricelli's law cannot apply.
Torricelli's law is based on inertial forces i.e. it balances the potential energy lost as the liquid falls with the kinetic energy gained at the outflow. However in fluid dynamics there are always two effects to consider - inertial forces and viscous forces. Torricelli's law considers only inertial forces and ignores viscosity of the liquid, so it applies only when inertial forces dominate. When viscous forces dominate the flow velocity will be determined by the Hagen-Poiseuille equation and that gives a different dependence of the velocity on the pressure:
$$ v \propto P \propto h $$
This is going to change our differential equation to:
$$ \frac{dh}{dt} \propto -C\,h \tag{2} $$
for some constant $C$ that will depend on the geometry of the hole and the viscosity of the fluid. Without going into the details, this will give you an exponential decay of $h$ with time.
So how $h$ varies with time in your experiment is going to depend on the details i.e. whether inertial or viscous forces dominate. I think you need to graph $\ln(h)$ against time and $\sqrt{h}$ against time and see which gives you a straight line. | {
"domain": "physics.stackexchange",
"id": 33162,
"tags": "fluid-dynamics"
} |
Nothing published to /scan under Hydro | Question:
After updating to Hydro, executing:
roslaunch turtlebot_bringup 3dsensor.launch
no longer seems to work correctly.
The expected topics appear, and it is possible to get some data off the Kinect: images and pointcloud data can be visualized in rviz. However, nothing is being published to the \scan topic.
Everything seems to work fine if I grab a copy of the Groovy version of 3dsensor.launch and launch that.
The issue is the same whether I use a Kinect or Asus Xtion.
Has anyone else had luck with 3dsensor.launch under Hydro?
Originally posted by nsprague on ROS Answers with karma: 228 on 2013-12-20
Post score: 1
Answer:
/scan topic may not work with the default 3dsensor.launch setup.
Try this.
3dsensor.launch depth_registration:=false
Originally posted by jihoonl with karma: 634 on 2013-12-22
This answer was ACCEPTED on the original site
Post score: 0
Original comments
Comment by nsprague on 2013-12-30:
Thanks! That solves the problem. Is there any reason this defaults to true? According to the documentation for openni_launch: "If true, use OpenNI's factory-calibrated depth->RGB registration", but it defaults to false for openni_launch. What is the practical impact of leaving it off?
Comment by tfoote on 2013-12-30:
It changes the type of output you get. With false, you get just depth clouds which are lower bandwidth and come out on a different topic. Changing it will break anyone using the old settings.
Comment by nsprague on 2014-01-02:
Good to know. I think this should be considered a bug in 3dsensor.launch. It will be confusing for users if different topics are "broken" based on a configuration setting. It's also inconsistent with the behavior in Groovy.
Comment by Daniel Stonier on 2014-01-02:
Create a ticket on github and when people have time they can check. I remember originally setting up 3dsensor.launch in groovy so everything was on and 'viewable' from rviz (but you shouldn't use it like this because it's cpu hungry), so it might be a regression.
Comment by nsprague on 2014-01-03:
https://github.com/turtlebot/turtlebot/issues/123
Comment by SHPOWER on 2014-01-15:
I tried the solution above. when I use depth_registration=false, I don't get the PointCloud2 output on rviz. | {
"domain": "robotics.stackexchange",
"id": 16507,
"tags": "kinect, turtlebot, ros-hydro"
} |
Maximum number of configurations of Turing machine after $n$ moves | Question: I came across following question:
What are maximum number of configuration of Turing Machine after $n$ moves?
The answer given was:
$k^n$, where $k$ is a branching factor.
And that "branching factor" left me confused. So I gave some thoughts: Given $Q$ be total number of states, $\Gamma$ be a tape alphabet and two moves, left and right $\{L,R\}$, for every transition function, we have $2^{Q\times \Gamma \times 2}$ possible transitions at each of those $n$ moves. Thus, $k$ must be $2^{Q\times \Gamma \times 2}$. So, total number of configurations of Turing machine after $n$ moves must be ${(2^{Q\times \Gamma \times 2})}^n$. Am I correct with this?
Answer: Note that you are bounding the maximal number of configurations, that the machine can be in, from above. To see it more clearly, when a TM $M$ runs on a finite word $w$ it induces a configuration tree $G = \langle V, E\rangle$ where:
$V$ is the set of configurations that $M$ can be in when it runs on $w$,
$q_0w$ is the initial configuration of $M$ on $w$ and it is the root of the configuration tree,
and the edges in $E$ are defined such that $u$ is a child of $v$ whenever $u$ a consecutive configuration of $v$ (by consecutive we mean that it is possible to reach $u$ from $v$ without violating the transition function of $M$).
As you have mentioned, $k$ is a branching factor, and we usually mean by that the maximal branching degree in the configuration tree. In this aspect you're not correct, $k$ is at most $|{Q\times\Gamma\times \{L, R\}}| = {2|Q|\cdot |\Gamma|}$, and this follows from the definition of the transition function. Indeed, $k$ bounds from above the number of consecutive configurations (which is a subset of configurations) and not the number of all possible subsets of configurations of the TM.
Considering the configuration tree, it is a tree of height ($n = |w|$) with branching degree $k \leq {2|Q|\cdot |\Gamma|}$. Now its not hard to see that such a tree has at most $k^n$ leaves which bounds from above the number of configurations that $M$ can be in when it runs on a word $w$ of length $n$.
A thing worth mentioning is that, $k$ is a constant that does not depend on the input word $w$ (on $n = |w|$) or on a specific configuration tree, and we know that it exists. This sometimes makes life a bit easier as some algorithms/proofs rely on its existence to conclude some upper bounds. | {
"domain": "cs.stackexchange",
"id": 14978,
"tags": "formal-languages, turing-machines, automata"
} |
Is black holes 'size' measure of its gravity? | Question: How can we talk about black holes size if it's a sizeless point of density striving for infinity, and measured is only the radius of optical manifestation of it's extremely hight gravity?
Can I assume that 'size' of black hole is measure of its gravity?
Answer: The gravity of a black hole is the measure of by which a frame is transported. This is measured by the symmetries of the spacetime, which is with the Killing vectors. The killing vector for the Schwarzshild metric is $K_t~=~(1~-~2m/r)^{1/2}\partial_t$, and $m~=~GM/c^2$. The horizon radius is of course $r_s~=~2m$. The surface gravity is then
$$
\nabla^\mu(K_\nu K^\nu)~=~-2gK^\mu,
$$
which with the application of $\nabla_\mu$ it is not hard to see that the gravity $g$ is
$$
g^2~=~-\frac{1}{2}\nabla^\mu K^\nu \nabla_\mu K_\nu.
$$
For the Schwarzschild metric this
$$
g~=~\frac{1}{1~-~2m/r}\frac{m}{r^2}.
$$
Clearly a tiny black hole can have a huge gravity close to the horizon. | {
"domain": "physics.stackexchange",
"id": 39880,
"tags": "black-holes, mass, event-horizon, singularities"
} |
If Proxima Centauri is only 4.25 light years away, why is there no attempt at communication? | Question: It is known that the Proxima Centauri star system is merely $\approx 4.25$ light years away from Earth. Moreover, an Earth-like planet has been discovered around Proxima B that is within the habitable zone. Given that there is such a short distance for potentially establishing communication with a hypothetical civilization, why is it that:
There have been no serious attempts at sending signals to that system - even if there is a very slim chance of receiving a response, surely the fact that there might be one in only $\approx 9.5$ years is a scientifically motivating one?
I understand that some people, like Hawkings, argue that contact with such civilizations should be avoided. Still, I see no significant debates about sending a signal to Proxima. Surely, there would be a number of scientists who support the idea to initiate a debate on the subject?
So, what gives?
Answer: According to Wikipedia, it's unlikely that the planet you reference is habitable for many reasons, most notably the likely lack of an atmosphere due to intense solar winds. Still, it has been seen as an interesting subject of exploration, as evidenced by the Starshot Breakthrough Initiative. Additionally, trying to send signals has been proposed, as seen here.
As for the case against sending signals to this specific star, maybe people are nervous that its proximity is more of a liability than an asset; in the miniscule chance that an intelligent civilization exists there and would pose a threat to us, then it would be much more dangerous than one at a distant location, which may make people more reluctant to target it. | {
"domain": "physics.stackexchange",
"id": 41319,
"tags": "astronomy, stars, space-travel, biology"
} |
Bash script to download sequentially numbered images | Question: This is a pretty basic bash script (3.2 on Mac). I am downloading 584 images from a site in order to create an album.
#!/bin/bash
urlFirst="http://150.216.68.252:8080/adore-djatoka/resolver?url_ver=Z39.88-2004&rft_id=http://150.216.68.252/ncgre000/00000012/00011462/00011462_ac_0"
urlSecond=".jp2&svc_id=info:lanl-repo/svc/getRegion&svc_val_fmt=info:ofi/fmt:kev:mtx:jpeg2000&svc.format=image/jpeg&svc.level=6"
for i in {1..584}
do
printf -v j "%03d" $i
url=$urlFirst$j$urlSecond
wget $url -O $j".jpeg"
done
What improvements should be made to this?
I understand that I could remove the variables in most cases but for debug purposes it was helpful to have them, as I could add basic echo statements.
I'm not very well versed in Bash scripting though (lots of basic scripts) - any improvements or best practices from Bash I'm missing would be appreciated.
Answer: I would write it this way:
#!/bin/bash
url_template="http://150.216.68.252:8080/adore-djatoka/resolver?url_ver=Z39.88-2004&svc_id=info:lanl-repo/svc/getRegion&svc_val_fmt=info:ofi/fmt:kev:mtx:jpeg2000&svc.format=image/jpeg&svc.level=6&rft_id=http://150.216.68.252/ncgre000/00000012/00011462/00011462_ac_%04d.jp2"
for i in {1..584}; do
wget -O $(printf '%03d.jpeg' $i) "$(printf "$url_template" $i)"
done
Specifically,
At first glance, I thought that $urlFirst and $urlSecond were two URLs, until I figured out what was happening.
If there is another leading 0, then %04d is more appropriate than %03d.
The web server isn't going to care about the order of the parameters in the query string. For the benefit of humans, though, it would be nicer to put the parameter that changes either first or last.
Fewer variables is nice. I do everything by interpolation. You can get a feel for what the wget command does, without tracing which variable came from where. | {
"domain": "codereview.stackexchange",
"id": 17582,
"tags": "bash, http, network-file-transfer"
} |
Idea for the Double Slit Experiment/Quantum Eraser | Question: I wanted to ask about an idea I got about the Double Slit Experiment.
As you may know, you can use the Double Slit Experiment to have particles form an interference pattern with themselves, however, observation of what slit they went through causes this pattern do disappear, even if that observation happened after the particle landed on the detector...
What would happen if you did this same experiment, but for example 4 at the same time. Lets say you wanted to send back the information 1001, so you decide that an interference pattern is 1, and no interference pattern is 0, you send the particles through, but delay observation of what slit they went through until a time after you observed where they landed. Then, you would remove the detectors from the 1st and 4th experiment in the line to cause it to be unknown what slit they went through, in effect causing an interference pattern, but for the 2nd and 3rd you did not remove the detectors, causing no interference pattern. Would this allow you to send information back in time? If not, what would stop it from happening? What would the results of what patterns appeared be?
Thank you for taking time to read this, and replying if you do so.
Answer: I think you are a little unclear about the quantum eraser experiment. If you read the wikipedia page for quantum eraser, you will see that their is no interference pattern visible in either case. The interference only emerges when you trace back the photons from both the detectors individually and then compare them with the previous done experiment to obtain two mutually exclusive interference patterns which in effect has no interference. | {
"domain": "physics.stackexchange",
"id": 44789,
"tags": "double-slit-experiment, quantum-eraser"
} |
Lorentz group and classification of fields by their transformation under Lorentz transformations | Question: Let's have Lorentz group with generators of 3-rotations, $\hat {R}_{i}$, and Lorentz boosts, $\hat {L}_{i}$. By introducing operators
$\hat {J}_{i} = \frac{1}{2}\left(\hat {R}_{i} + i\hat {L}_{i}\right), \quad \hat {K}_{i} = \frac{1}{2}\left(\hat {R}_{i} - i\hat {L}_{i}\right)$
we makes algebra of the Lorentz group the same as SU(2) (or SO(3)) group. So each irreducible representation of the Lorentz group can be built as
$$
\hat {\mathbf S}^{(j_{1}, j_{2})} = \hat {\mathbf S}^{j_{1}}\times \hat {\mathbf S}^{j_{2}},
$$
where $j_{1}, j_{2}$ are the max eigenvalues of $\hat {J}_{i}, \hat {K}_{i}$,
and it has dimention $(2j_{1} + 1)\times (2j_{2} + 1)$. The type of object, transforming via boosts and 3-rotations, is depend on $(j_{1}, j_{2})$:
$$
\Psi_{\alpha \beta} = S^{j_{1}}_{\alpha \mu}S^{j_{2}}_{\beta \nu}\Psi_{\mu \nu}.
$$
For $(0, 0)$ we have scalar, for $\left(\frac{1}{2}, 0 \right), \left(0, \frac{1}{2}\right)$ we have spinor (left- and right-handled) etc. The value $j_{1} + j_{2}$ corresponds to the maximum value of $\hat {J}_{i} + \hat {K}_{i} = \hat {R}_{i}$, so it is an eigenvalue of irreducible rep of 3-rotation operator and corresponds to the spin number.
But the irreducible rep of Lorentz group isn't unitary.
So, the question: how can we classify the objects via transformations by using non-unitary reps?
Answer: Note that particles correspond to irreductible unitary representations of the Poincaré group (alias inhomogeneous Lorentz group), not the Lorentz group alone.
In these Poincaré representations, states are represented by $|p, \lambda \rangle$.
$p$ is the momentum.
Let's consider positive massive representations ($p^2 = m^2, p^o >0$)
Let $\pi=(m,\vec 0)$ . We see that we have a freedom to choose polarization, which corresponds to a $S0(3)$ symmetry. Looking at unitary representations of $SO(3)$ is the same thing that looking at representations of $SU(2)$
Here, $\lambda$ is a state basis for a little group $SU(2)$ representation $s$.
For a translation, we have :
$$U(a)|p, \lambda \rangle = e^{ iP.a}|p, \lambda \rangle$$
For a member $R$ of the little group $SU(2)$ , we have :
$$U(R)|\pi, \lambda \rangle = \sum_{\lambda'} D^{(s)}_{\lambda' \lambda}(R)|\pi, \lambda' \rangle$$
For any $SL(2,C)$ matrix $A$ , and for any $p$, it is possible to write an expression :
$$U(A)|p, \lambda \rangle = \sum_{\lambda'} D^{(s)}_{\lambda' \lambda}(W(p, A))| \Lambda_ap, \lambda' \rangle$$
where $W(p,A)$ is a $SU(2)$ little group element (see formula $18$ in the reference cited below for details)
With all this, you get an unitary representation of the Poincaré group.
The "Fock space" is the quantum version of these representations, that is it allows several-particles states.
See Reference pages 4 and 5
[EDIT]
"For fields isn't important to have lorentz-invariant positive definite norm?"
No. Take for instance the Dirac equations for the bi-spinor field. The representation is $(1/2,0) + (0,1/2)$. This is not a unitary representation. There is a left and a right spinor. The transformation could be written :
$$\psi_{L,R} =\rightarrow e^{1/2(i\vec \sigma. \vec \theta \mp \vec \sigma. \vec \phi)}\psi_{L,R},$$
The parameters $\vec \theta$ correspond to rotations, the parameters $\vec \phi$ correspond to boosts.
Because the boost part is not unitary, we see clearly that the representation is not unitary.
So, this means that the bispinor bilinear expression $\psi^* \psi = \psi^*_{L}\psi_{L} + \psi^*_{R}\psi_{R}$ is not conserved in a Lorentz transformation [in fact, separarely, the spinor bilinear expressions $\psi^{*}_{L} \psi_{L}$ or $\psi^{*}_{R} \psi_{R}$ are not conserved too]. Remember here that the $ \psi,\psi_{L}, \psi_{R}$ are fields, not "wave function".
Is this a problem ? No.
What is $\psi^*(x) \psi(x)$ ? It is just (mutliplied by $e$) the charge density of fields, that is $j^0(x)$
So, of corse, $j^0(x)$ is not an invariant for a Lorentz transformation, because it is the time component of a Lorentz vector.
The real Lorentz invariant is here : $\overline \psi(x) \psi(x)= \psi^*(x) \gamma^0 \psi(x)$ | {
"domain": "physics.stackexchange",
"id": 8952,
"tags": "quantum-field-theory, group-theory, group-representations"
} |
How many iterations does the Bellman-Ford algorithm need for directed and undirected graphs | Question: The Bellman-Ford algorithm on a graph with $n$ vertices, normally includes a loop executed $n-1$ times. Each time through the loop we iterate over the list of edges $(u,v)$ and relax $v$. Note that we don't relax $u$ and $v$ on each iteration through the edges.
What I don't understand is that if $G$ is an undirected graph with $n$ vertices, then it is equivalent to a directed graph with $2n$ vertices. We simply think of the edge between $u$ and $v$ as a set $\{u,v\}$ for an undirected graph, and as the ordered pair $(u,v)$ for a directed graph.
I don't understand why the Bellman-Ford algorithm needs only $n-1$ repetitions for both a directed and undirected graph. It seems like it should take $n-1$ repetitions for directed graph, and $2n-1$ repetitions for undirected graphs or we should relax both vertices of an edge on each iteration.
Otherwise stated, why does running Bellman-Ford on a directed graph, also find the shortest paths of the undirected graphs?
Answer: The Bellman-Ford algorithm only needs $n-1$ iterations, regardless of the number of edges. The number of iterations needed depends only on the number of vertices, not on the number of edges.
When you talk about converting a directed graph to an undirected graph, that conversion increases the number of edges but does not change the number of vertices. Thus, it doesn't change the number of iterations needed for the Bellman-Ford algorithm. | {
"domain": "cs.stackexchange",
"id": 13450,
"tags": "algorithms, graphs, algorithm-analysis, shortest-path"
} |
Snake Game with Swing | Question: I put my code below to ask you for a review and ask you if this can be considered a good and clean code?
The game works just fine, I have encountered one issue though - the Food condition to not to create new food on current snake position is not working, and to be honest with you - I have been trying to solve it, but forgot about it and now when I am posting this I have this in my mind one more time.
I have added some buttons to stop/pause the game (can be done by pressing Space as well) and choose speed. Score is related to speed value (score for food is the same as the number of speed).
What's the most interesting I guess is the fact, that almost all snake tutorials around the internet allow you (if you are fast enough) to bug the game with those simple steps: Assume, that the snake moves to the Right, timer is set to some amount of time, and if you manage to press Up or Down arrow and then very fast Left (when the timer hasn't gone off), the snake will move backwards and finally eat himself. Most of the games doesn't allow you to change from Right to Left (or Up to Down etc.) in one step, but this is allowed. I prevent players from doing that by implementing temporary direction, that reads the current direction at the begining of a time period (or refresh rate - it is the same here).
I would like to hear your comments, because I am not quite sure if I get (and if I can implement that) all the Object-oriented programming right. Thank you!
package com.RGuSnake;
public class Main {
public static void main(String[] args) {
Snake.getInstance().createBoard();
}
}
package com.RGuSnake;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Random;
public class Snake extends JPanel implements ActionListener {
private int sizeWidth;
private int sizeHeight;
private int offsetWidth;
private int offsetHeight;
private int scale;
private ArrayList<Point> snakeLocation;
private static Point food;
private String direction = "RIGHT";
private String tmpDirection = "RIGHT";
private static final Snake snake = new Snake();
private Integer delay;
private Boolean isPaused = false;
private Boolean isAlive = false;
private Timer timer;
private Board board;
private Buttons buttons;
private JFrame frame;
private Integer score=0;
private int speed=5;
private Snake() {
}
public static Snake getInstance() {
return snake;
}
public void createBoard() {
frame = new JFrame("Typical Snake Game");
snakeLocation = new ArrayList<>();
snakeLocation.add(new Point(-100,-100));
food=new Point(-100,-100);
board = new Board();
sizeWidth=board.getSizeWidth();
sizeHeight=board.getSizeHeight();
offsetHeight=board.getOffsetHeight();
offsetWidth=board.getOffsetWidth();
scale=board.getScale();
buttons = new Buttons();
frame.getContentPane().add(BorderLayout.CENTER, board);
frame.getContentPane().add(BorderLayout.SOUTH, buttons);
frame.setPreferredSize(new Dimension(sizeWidth + 2 * offsetWidth, sizeHeight + 2 * offsetHeight + 50));
frame.setResizable(false);
frame.setVisible(true);
frame.pack();
frame.setLocationRelativeTo(null);
frame.setFocusable(true);
frame.requestFocus();
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
}
public void startGame() {
delay=100+(5-speed)*15;
System.out.println(delay);
timer = new Timer(delay, this);
System.out.println(delay);
if(frame==null){
snake.createBoard();
}
score=0;
direction="RIGHT";
snakeLocation.clear();
for(int i=0;i<6;i++){
snakeLocation.add(new Point(Math.round((sizeWidth+offsetWidth)/(2*10))*10-i*10, Math.round((sizeHeight+offsetHeight)/(2*10))*10));
}
newFood();
buttons.blockButtons();
isAlive = true;
isPaused = false;
timer.start();
}
public ArrayList<Point> getSnakeLocation() {
return snakeLocation;
}
public Point getFoodLocation() {
return food;
}
public Boolean getIsAlive() { return isAlive;}
public void setDirection(String dir) {
snake.direction = dir;
}
public String getDirection() {
return snake.direction;
}
public String getTmpDirection(){
return snake.tmpDirection;
}
public void spacePressed(){
if(!isAlive) {
snake.startGame();
}
else {
isPaused^=true;
}
}
public Boolean getPause(){
return isPaused;
}
public void move() {
if (direction.equals("RIGHT")) {
snakeLocation.add(0, new Point(snakeLocation.get(0).x + 10, snakeLocation.get(0).y + 0));
} else if (direction.equals("LEFT")) {
snakeLocation.add(0, new Point(snakeLocation.get(0).x - 10, snakeLocation.get(0).y + 0));
} else if (direction.equals("UP")) {
snakeLocation.add(0, new Point(snakeLocation.get(0).x, snakeLocation.get(0).y - 10));
} else if (direction.equals("DOWN")) {
snakeLocation.add(0, new Point(snakeLocation.get(0).x, snakeLocation.get(0).y + 10));
}
}
public void actionPerformed(ActionEvent arg0) {
if(!isPaused && isAlive) {
tmpDirection = direction;
snake.move();
snake.checkPosition();
//refresh();
board.repaint();
} else if(!isAlive) {
timer.stop();
buttons.enableButtons();
}
}
public void newFood() {
Random random = new Random();
Point point;
point = new Point(random.nextInt(sizeWidth / scale) * scale + offsetWidth, random.nextInt(sizeHeight / scale) * scale + offsetHeight);
while (Arrays.asList(getSnakeLocation()).contains(point)) {
point = new Point(random.nextInt(sizeWidth / scale) * scale + offsetWidth, random.nextInt(sizeHeight / scale) * scale + offsetHeight);
}
food = point;
}
public void increaseScore() {
score=score+speed;
}
public int getScore(){
return score;
}
public void increaseSpeed(){
if(speed<10) {
speed += 1;
}
}
public void decreaseSpeed(){
if(speed>1) {
speed -= 1;
}
}
public int getSpeed(){
return speed;
}
public void refresh(){
board.repaint();
}
public void checkPosition(){
for (int j = 1; j < snakeLocation.size()-1; j++) {
if (snakeLocation.get(0).equals(snakeLocation.get(j))) {
isAlive = false;
}
}
if (snakeLocation.get(0).x==offsetWidth-scale || snakeLocation.get(0).x==sizeWidth+offsetWidth ||snakeLocation.get(0).y==offsetHeight-scale || snakeLocation.get(0).y==sizeHeight+offsetHeight) {
isAlive = false;
}
if (snakeLocation.get(0).equals(food)) {
newFood();
increaseScore();
}
else {
snakeLocation.remove(snakeLocation.size() - 1);
}
}
}
package com.RGuSnake;
import javax.swing.*;
import java.awt.*;
import java.util.ArrayList;
public class Board extends JPanel {
private int sizeWidth = 300;
private int sizeHeight = 300;
private int offsetWidth = 30;
private int offsetHeight = 30;
private int scale = 10;
Snake snake=Snake.getInstance();
public void paintComponent(Graphics g) {
super.paintComponent(g);
//super.update(g);
ArrayList<Point> points = Snake.getInstance().getSnakeLocation();
g.setColor(Color.BLACK);
g.fillRect(offsetWidth - scale, offsetHeight - scale, sizeWidth + 2 * scale, sizeHeight + 2 * scale);
g.setColor(Color.LIGHT_GRAY);
g.fillRect(offsetWidth, offsetHeight, sizeWidth, sizeHeight);
g.setColor(Color.BLACK);
for (int i = 1; i < points.size(); i++) {
g.fillRect(snake.getSnakeLocation().get(i).x, snake.getSnakeLocation().get(i).y, scale, scale);
}
g.setColor(Color.RED);
g.fillRect(snake.getSnakeLocation().get(0).x, snake.getSnakeLocation().get(0).y, scale, scale);
g.setColor(Color.BLUE);
g.fillRect(snake.getFoodLocation().x, snake.getFoodLocation().y, scale, scale);
g.setColor(Color.RED);
Font font = new Font("Verdana", Font.BOLD, 12);
g.setFont(font);
FontMetrics fm = g.getFontMetrics();
String score = "Score: " + snake.getScore() + " Speed: " + snake.getSpeed();
g.drawString(score, (offsetWidth * 2 + sizeWidth) / 2 - fm.stringWidth(score) / 2, offsetHeight / 2);
if (!snake.getIsAlive()) {
font = new Font("Verdana", Font.BOLD, 12);
g.setFont(font);
String gameOver1 = "CHOOSE THE SPEED";
String gameOver2 = "BEFORE STARTING NEW GAME";
fm = g.getFontMetrics();
g.setColor(Color.red);
g.drawString(gameOver1, (offsetWidth * 2 + sizeWidth) / 2 - fm.stringWidth(gameOver1) / 2, (offsetHeight + sizeHeight) / 2);
g.drawString(gameOver2, (offsetWidth * 2 + sizeWidth) / 2 - fm.stringWidth(gameOver2) / 2, (offsetHeight + sizeHeight + 40) / 2);
//String speed = "Game speed: " + snake.getSpeed();
//g.drawString(speed,(offsetWidth*2+sizeWidth)/2-fm.stringWidth(speed)/2,(offsetHeight+sizeHeight)/2+70);
}
}
public int getSizeWidth() {
return sizeWidth;
}
public int getOffsetWidth() {
return offsetWidth;
}
public int getSizeHeight(){
return sizeHeight;
}
public int getOffsetHeight() {
return offsetHeight;
}
public int getScale(){
return scale;
}
}
package com.RGuSnake;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.KeyEvent;
public class Buttons extends JPanel {
Snake snake=Snake.getInstance();
Board board=new Board();
JButton startGame;
JButton speedDown;
JButton speedUp;
public Buttons(){
JPanel buttonsPanel = new JPanel();
buttonsPanel.setPreferredSize(new Dimension(board.getSizeWidth()+board.getOffsetWidth(), 20));
startGame = new JButton("START!");
startGame.setBackground(Color.red);
speedDown = new JButton("Speed Down");
speedUp = new JButton("Speed Up");
startGame.addActionListener(new startGame());
speedUp.addActionListener(new SpeedUp());
speedDown.addActionListener(new SpeedDown());
startGame.setFocusPainted(false);
startGame.setFocusable(false);
speedDown.setFocusPainted(false);
speedDown.setFocusable(false);
speedUp.setFocusPainted(false);
speedUp.setFocusable(false);
buttonsPanel.setLayout(new GridLayout());
buttonsPanel.add(startGame);
buttonsPanel.add(speedDown);
buttonsPanel.add(speedUp);
InputMap im = buttonsPanel.getInputMap(JPanel.WHEN_IN_FOCUSED_WINDOW);
ActionMap am = buttonsPanel.getActionMap();
im.put(KeyStroke.getKeyStroke(KeyEvent.VK_RIGHT, 0), "RightArrow");
im.put(KeyStroke.getKeyStroke(KeyEvent.VK_LEFT, 0), "LeftArrow");
im.put(KeyStroke.getKeyStroke(KeyEvent.VK_UP, 0), "UpArrow");
im.put(KeyStroke.getKeyStroke(KeyEvent.VK_DOWN, 0), "DownArrow");
im.put(KeyStroke.getKeyStroke(KeyEvent.VK_SPACE, 0), "Space");
am.put("RightArrow", new ArrowAction("RightArrow"));
am.put("LeftArrow", new ArrowAction("LeftArrow"));
am.put("UpArrow", new ArrowAction("UpArrow"));
am.put("DownArrow", new ArrowAction("DownArrow"));
am.put("Space", new ArrowAction("Space"));
add(buttonsPanel);
}
public void blockButtons() {
startGame.setText("PAUSE");
speedUp.setEnabled(false);
speedDown.setEnabled(false);
}
public void enableButtons() {
startGame.setText("START!");
speedUp.setEnabled(true);
speedDown.setEnabled(true);
}
private class startGame implements ActionListener {
@Override
public void actionPerformed(ActionEvent e) {
snake.spacePressed();
}
}
private class SpeedUp implements ActionListener {
@Override
public void actionPerformed(ActionEvent e) {
snake.increaseSpeed();
snake.refresh();
}
}
private class SpeedDown implements ActionListener {
@Override
public void actionPerformed(ActionEvent e) {
snake.decreaseSpeed();
snake.refresh();
}
}
}
package com.RGuSnake;
import javax.swing.*;
import java.awt.event.ActionEvent;
public class ArrowAction extends AbstractAction {
Snake snake=Snake.getInstance();
private String cmd;
public ArrowAction(String cmd) {
this.cmd = cmd;
}
@Override
public void actionPerformed(ActionEvent e) {
if ((cmd.equalsIgnoreCase("LeftArrow")) && !snake.getDirection().equals("RIGHT") && !snake.getTmpDirection().equals("RIGHT") && !snake.getPause()) {
snake.setDirection("LEFT");
} else if (cmd.equalsIgnoreCase("RightArrow") && !snake.getDirection().equals("LEFT") && !snake.getTmpDirection().equals("LEFT")&& !snake.getPause()) {
snake.setDirection("RIGHT");
} else if (cmd.equalsIgnoreCase("UpArrow")&& !snake.getDirection().equals("DOWN") && !snake.getTmpDirection().equals("DOWN")&& !snake.getPause()) {
snake.setDirection("UP");
} else if (cmd.equalsIgnoreCase("DownArrow")&& !snake.getDirection().equals("UP") && !snake.getTmpDirection().equals("UP")&& !snake.getPause()) {
snake.setDirection("DOWN");
} else if (cmd.equalsIgnoreCase("Space")) {
snake.spacePressed();
}
}
}
Answer:
I put my code below to ask you for a review and ask you if this can be considered a good and clean code?
Your program seems to suffer from the god object anti-pattern.
In short: your Snake class does almost everything.
An easy starting point to mitigate this is saving the board instance as a field in the Snake class and move some board-related functionality to the Board class (the Composition pattern).
That way you can remove the following fields from your Snake class:
private int sizeWidth;
private int sizeHeight;
private int offsetWidth;
private int offsetHeight;
private int scale;
If we do need one of those variables you just use the getter on the board:
board.getWidth()
Next look at where those variables are used and see if that functionality could fit in the Board class instead.
For example the newFood() method. My first thought is to put this method inside the Board class. And from the Snake class (or whoever triggers spawning a new piece of food later on) just do board.spawnNewFood().
This also means we can move
private static Point food;
inside the board as well. The idea is to remove all food related methods from the Snake class. It shouldn't be too hard to finish this part yourself.
I suggest to also add a method to Board to return a random spawning location for a new snake. This way if you later add some new special squares to your board like walls, you can make sure to spawn the snake in a valid location.
A major change I suggest is to split up the Snake class into a class that represents the game (with all the buttons, and initiating game state and coupling the other classes) and another that actually represents a Snake (with a position and movement and other snake related stuff).
This change is a bit too big to post in my answer here though, so you'll have to try yourself first.
Now that I pointed out the first steps to improving the cleanliness of your code, I'll try to address the problem of where to move next.
Your tmpDirection was a good first attempt, but I suggest to take a slightly different approach.
The 10 in move() is a magic number, declare it as a field or a parameter to move() (recommended), as (a suggestion) double moveDistance. If you're sure that there will never be a need to change that value during the running of the program (think about it, if the user resizes the window, you should handle it gracefully and scale moveDistance to the playing area for the configured speed)
Since the direction can have only one among a specific set of values, an enum is recommended:
public enum Direction {
UP, DOWN, LEFT, RIGHT
}
I suggest to also keep 2 variables for the movement in the snake:
private Direction currentDirection = Direction.RIGHT;
private Direction nextDirection = Direction.RIGHT;
The currentDirection is the direction the snake is currently moving. The nextDirection is the direction the snake will move on the next call to move().
Now in ArrowAction modify nextDirection instead of currentDirection. This means that if you quickly press ↑ and then →, your ArrowAction for UP will change nextDirection to UP, and your ArrowAction for LEFT will not allow it, since the currentDirection is still going RIGHT.
Then also modify the move implementation slightly to first update the currentDirection:
// Assuming that `Direction` has been imported into scope
public void move() {
currentDirection = nextDirection;
switch(currentDirection) {
case UP: snakeLocation.add(0, new Point(snakeLocation.get(0).x, snakeLocation.get(0).y - 10));
break;
case DOWN: snakeLocation.add(0, new Point(snakeLocation.get(0).x, snakeLocation.get(0).y + 10));
break;
case LEFT: snakeLocation.add(0, new Point(snakeLocation.get(0).x - 10, snakeLocation.get(0).y));
break;
case RIGHT: snakeLocation.add(0, new Point(snakeLocation.get(0).x + 10, snakeLocation.get(0).y));
break;
default: throw new IllegalArgumentException("Unknown move direction");
}
}
A minor gameplay suggestion:
Increase the speed proportionally with the length of the snake (the amount of food consumed) at a higher difficulty level.
I hope these tips are enough to get the idea on how to improve your code.
Good luck! | {
"domain": "codereview.stackexchange",
"id": 24876,
"tags": "java, swing, snake-game"
} |
Irreducible Spherical Tensor Operators | Question: I had a question about Irreducible Spherical Tensor Operators and their relationship with the Clebsch Gordon coefficients.
Consider, then, a dyadic product $T_{ik} = X_{i}Z_{k}$ of two even parity rank 1 spherical tensors $X_{i}$ and $Z_{k}$, where $i,k = -1, 0, 1$. The questions is, how can one express the following matrix elements of $T_{ik}$ with respect to the
$| j m \rangle$ states,
$\langle1 - 1| T_{-1 -1}| 11 \rangle$ and $\langle 1 0| T_{0 -1}| 11 \rangle$, in terms of the following quantities:
$\alpha_{1} $ = $\langle1 1| T_{0 0}| 11 \rangle$,
$\alpha_{2} $ = $\langle1 1| T_{1 -1}| 11 \rangle$,
$\alpha_{3} $ = $\langle1 1| T_{-1 1}| 11 \rangle$.
Here is my approach to this problem, but I seem to be getting lost in the mathematics, and I am starting to wonder if this approach is even right or not.
Construct the tensor using:
$T^{(k)}_{q}$ = $\sum_{q_{1}} \sum_{q_{2}} \langle 11; q_{1} q_{2}| 11; kq \rangle X_{q_{1}}Z_{q_{2}}. $
Let's explicitly work out at least one of the equations.
$T^{(2)}_{0}$ = $\sum_{q_{1}} \sum_{q_{2}} \langle 11; q_{1} q_{2}| 11; 20 \rangle X_{q_{1}}Z_{q_{2}}. $
Since $q_{1}, q_{2} = -1, 0, 1$. We can carry out the first sum and achieve the following:
$T^{(2)}_{0}$ = $\sum_{q_{1}} \langle 11; q_{1} -1| 11; 20 \rangle X_{q_{1}}Z_{-1} $ + $\sum_{q_{1}} \langle 11; q_{1} -1| 11; 20 \rangle X_{q_{1}}Z_{0}$ $ + \sum_{q_{1}} \langle 11; q_{1} 1| 11; 20 \rangle X_{q_{1}}Z_{1}$.
Now we can carry out the second sum. In theory, there should be nine terms in total, but that is simplified as most of the Clebsh-Gordon Coefficients (CGC) are zero because of the following know fact: $q = q_{1} + q_{2}$; otherwise, CGC are zero.
$\langle 11; 1 -1| 11; 20\rangle X_{1}Z_{-1}$ +
$\langle 11; 0 0| 11; 20\rangle X_{0}Z_{0}$ + $\langle 11; -1 1| 11; 20\rangle X_{-1}Z_{1}$.
These CG coefficients can be looked up in the table and this equation is simplified to the following:
$T^{(2)}_{0}$ = $\frac{1}{\sqrt{6}} T_{1-1}$ +
$\frac{2}{\sqrt{3}} T_{00}$ + $ \frac{1}{\sqrt{6}} T_{-11}$.
This is only one of the many equations one can develop. My "algorithm" to solve this problem is to develop all such equations, manipulate them, and get the desired matrix elements in terms of the desired quantities. However, I cannot seem to get to that point. May be I am on the wrong track here? May be I also need to use recursion relationships of tensor operators? Any help would be appreciated, especially if someone who would like to take a good shot at this question.
Answer: It seems what you want is the "reverse" expansion
\begin{align}
X_iZ_j=\sum_{Lk} T^L_{k} \langle 1i; 1j\vert Lk\rangle\, .
\end{align}
Of course if $X_i$ and $Z_j$ act in different spaces you will have to unwrap your $\vert JM\rangle$ into its $\vert \ell_1m_1\rangle \vert \ell_2m_2\rangle$ bits. The matrix element of your composite tensor then factors into a product of reduced matrix elements of the type
\begin{align}
\langle \ell'_1\ell'_2;J'\Vert XZ \Vert \ell_1\ell_2;J\rangle \propto
W \times \langle \ell'_1\Vert X\Vert \ell_1\rangle\ell'_2\Vert X\Vert \ell_2\rangle
\end{align}
where $W$ is a Racah W coefficient.
This type of gymnastics is common in the usual angular momentum textbooks such as
Rose, M.E., 1995. Elementary theory of angular momentum. Courier Corporation,
Brink, D.M. and Satchler, G.R., 1968. Angular momentum. | {
"domain": "physics.stackexchange",
"id": 63373,
"tags": "quantum-mechanics, hilbert-space, angular-momentum, operators, representation-theory"
} |
tf.LookupException: "base_link" passed to lookupTransform argument target_frame does not exist | Question:
Hello together,
I am new with ros tf's and already worked myself threw the tf tutorials. (I am using ros indigo)
As my first test with tf's after the tutorial I wrote 2 small nodes.
The first one is sending a tf:
#!/usr/bin/env python
import rospy
import tf
class tf_firsttry_pub_class():
def __init__(self):
rospy.init_node('tf_firsttry_pub')
now = rospy.Time.now()
br = tf.TransformBroadcaster()
while not rospy.is_shutdown():
br.sendTransform((1, 1, 1), (0, 0, 0, 1), rospy.Time(), 'one','base_link')
rospy.Rate(50)
if __name__ == '__main__':
try:
tf_firsttry_pub_class()
except rospy.ROSInterruptException:
pass
The second is listening to the tf:
#!/usr/bin/env python
import rospy
import tf
class tf_firsttry_listen_class():
def __init__(self):
rospy.init_node('tf_firsttry_listen')
listener = tf.TransformListener()
now = rospy.Time.now()
while not rospy.is_shutdown():
(trans,rot)=listener.lookupTransform('/base_link','/one',rospy.Time(0))
print trans
if __name__ == '__main__':
try:
tf_firsttry_listen_class()
except rospy.ROSInterruptException:
pass
When starting the second node this error occurs:
line 11, in init
(trans,rot)=listener.lookupTransform('/base_link','/one',rospy.Time(0))
tf.LookupException: "base_link" passed to lookupTransform argument target_frame does not exist.
I already found in a different question that a try: except: shell around the listener could help as it may only fail at the beginning but that doesn't help.
As you see the code is easy but I am totally stuck. I think I misunderstood something but can't figure out what.
Any solutions?
Thanks in advance!
Originally posted by Bant on ROS Answers with karma: 103 on 2016-07-26
Post score: 4
Answer:
At first glance, your frames are different in the first node than the second node. 'Baselink' in the first node became '/base_link' in the second node and 'One' became '/one'. The strings identify the frames and when you lookup the transform you need to look for the frames with the same string that you publish them with.
Originally posted by alexvs with karma: 91 on 2016-07-26
This answer was ACCEPTED on the original site
Post score: 4
Original comments
Comment by Bant on 2016-07-27:
Sorry about that. I got messed up with the names during testing. But even with the same names the error remains.
Comment by Bant on 2016-07-27:
Fixed. A wait for transform in the second code and a rospy.rate(150) in the first one made it working. ;) | {
"domain": "robotics.stackexchange",
"id": 25359,
"tags": "ros, transforms, tf2, rospy, transform-listener"
} |
Why doesn't this question violate the conservation of mass? | Question:
Combustion of of $0.255 \space g$ of isopropyl alcohol produces $0.561
\space g$ of $\ce{CO_2}$ and $0.306 \space g$ of $\ce{H_2O}$. Determine the
emperical formula of isopropyl alcohol.
I'm not asking how to solve this problem, but why this doesn't violate the conservation of mass? Shouldn't the initial combusted mass equal the sum of the resultant masses?
Answer: Because the problem statement indirectly includes an additional reactant.
At an introductory chemistry level, the term combustion implies reaction with $\ce{O2}$. Thus, your missing mass should show up when you introduce a stoichiometric amount of oxygen to your reactants. | {
"domain": "chemistry.stackexchange",
"id": 10303,
"tags": "stoichiometry, combustion"
} |
Causal Signal - Fourier Transform or Laplace Transform | Question: I am dealing with a physics problem which is related to signal processing. The problem requires me to calculate the instantaneous force acting on a body which depends on some physical parameter $x$. Assume that $x(t)$ is periodic in time for the moment. Since $x(t)$ is periodic, then it can be expanded as a Fourier series with different frequency components (and it doesn't really matter if $x(t)$ is causal). The calculation for the instantaneous force involves adding a complex phase shift (which may depend on the frequency) to each of the frequency component. To do that, I can use the convolution theorem and take the convolution of $x(t)$ with some kernel $\kappa(t)$ whose Fourier transform gives me the required phase shifts, i.e. $\tilde{\kappa}(\omega) \propto e^{i\delta(\omega)}$ where $\delta(\omega)$ is the phase shift.
Now if in reality $x(t)$ is not periodic and is causal since I only know its values in the past, can I still apply the same kernel to get the instantaneous force? I have been told that I should use Laplace transform instead of Fourier transform. I see the point of it being bilateral by definition, but I am not sure how it is actually different to Fourier transform. Does applying the convolution theorem to a causal signal still give me the desired phase shifts?
Answer: What you want is an all-pass filter with frequency response
$$H(\omega)=e^{j\phi(\omega)}\tag{1}$$
where $\phi(\omega)$ is the desired phase shift (and $j$ is how we denote the imaginary unit over here). This system is called an all-pass filter because clearly $|H(\omega)|=1$ holds.
The type of input signal is irrelevant, it can be periodic, non-periodic, causal, or non-causal; if you filter it with a linear time-invariant (LTI) filter with a frequency response given by $(1)$ then the desired phase shift will be achieved.
Your problem is the (causal and stable) realization of such a filter. In general, for a given phase shift $\phi(\omega)$ the frequency response given by $(1)$ cannot be implemented exactly; it can only be approximated. | {
"domain": "dsp.stackexchange",
"id": 5057,
"tags": "fourier-transform, laplace-transform"
} |
Multiple Objects at Constant Speed | Question: Imagine 1 million objects travelling in space, at a constant speed, along an imaginary line. They don't deviate from that line for the sake of this argument.
Now, object 1 has a velocity of $v_{1}=1$ km/s relative to a point in space.
Object 2 has a velocity of $v_{2,1}=1$ km/s relative to object 1, which means $v_{2}=2$ km/s when calculating the velocity related to the point in space we considered with object 1.
Object 3 has a velocity of $v_{3,2}=1$ km/s relative to object 2, which means $v_{3}=3$ km/s when calculating the velocity related to the point in space we considered with object 1.
Then I would assume that object 1 million as a velocity of $v_{1M}=1$ million km/s when calculating the velocity related to the point in space we considered with object 1. However, this goes against the principle that nothing can exceed the speed of light.
So:
1.) Where is the error in this reasoning?
2.) How can we talk about the existence of a maximum velocity when $velocity$ is actually vector-based measurement which changes with the reference we consider?
Answer: As per the comments, I wasn't taking into account the relativistic addition of velocities, which is becomes relevant when designing scenarios with such high velocities.
So for a observer in the point specified in my argument, the fastest objects (object #1 million, object #999.999, ...) would appear to have velocities close to light speed, but they would never reach it.
For more information check out the Website http://hyperphysics.phy-astr.gsu.edu/hbase/relativ/einvel.html
Also, this question might be a duplicate of others. If you have questions and feel this answer isn't enough, consider checking the others out: Relativistic addition of velocities of spaceships | {
"domain": "physics.stackexchange",
"id": 29962,
"tags": "special-relativity, speed-of-light, velocity, faster-than-light, inertial-frames"
} |
Degree of freedom of a mechanism | Question: A mechanism popped up in my mind when I was studying kinematics, see picture below
$J1,\ J2,\ J3$ are rotational joints and there are two translational joints $J4$ and $J5$ between two sliders and the cross hatched rail. Sliders are free to move horizontally towards or away from one another. Now when I imagine a motion in which a force is applied vertically at $J1$, pushing or pulling on the said joint, thus sliders move away or come closer, I think it is a one degree of freedom mechanism. However the famous formula $3n-2j-3$ where $n$ is the number of links and $j$ is the number of lower pair joints, gives two degrees of freedom. Considering the rail as a ground link, with two sliders and two arms ($J1J2$ and $J1J3$) we have $n=5$ and we have $j=5$
$3*5-2*5-3=2$
So, what is with that?
Answer: This is a 2 degree of freedom mechanism.
For me its easier to think of it by translation of the horizontal sliders. When you know the position of the both the horizontal sliders then you know the position of the mechanism.
Another way to look at it: If you know the position of J1 (which has two degrees of freedom, then you automatically know the position of the horizontal sliders, and the angles. | {
"domain": "engineering.stackexchange",
"id": 4068,
"tags": "mechanical-engineering, applied-mechanics, kinematics"
} |
Modeling the motion of a bouncing ball | Question: I'm writing a program that displays a line of text, and animates a ball that bounces from syllable to syllable (like a sing-along). The program knows the location of each syllable, and it knows at what time the ball should be at each syllable.
I have a set of equations that work OK, but not great. I came up with them a few years ago after much googling and stumbling about. They take the location of the previous syllable ($x_0$, $y_0$), the location of the next syllable ($x_1$, $y_1$), the time t (from start 0 to finish 1), and compute where the ball should be ($x$, $y$):
$$d = x_1 - x_0$$
$$v = d/t$$
$$ h = 5 + 0.3 |d|$$
\begin{align}
x(t) &= x_0 + v t\\
y(t) &= y_0 - h + \left[4 \frac{h}{d^2} \left(\frac{|d|}{2}- |v| t \right)^2 \right]
\end{align}
What I would like is a better set of equations that more accurately model the motion of a bouncing ball. A ball with a mind of it's own, I suppose, as it does need to change speed and direction with each new syllable.
Answer: You need the position of the ball $(x(t)$, $y(t))$ for $0<t<1$ if at $t=0$, the ball was thrown with initial velocity $(v_x,v_y)$ at the position $(x_0,y_0)$ in a gravitational field of acceleration $\overrightarrow{a}=(a_x,a_y)=(0,-g)$. The velocity has to be calibrated in order to make the ball arrive the point ($x_1,y_1$) at $t=1$.
The position of the ball is given by
$$x(t)=x_0 + v_x t$$
$$y(t)=y_0 + v_y t - g\frac{t^2}{2}$$
We want to obtain $(v_x,v_y)$ to get $x(t=1)=x_1$ and $y(t=1) = y_1$, so,
$$x(t=1)=x_0 + v_x = x_1$$
$$y(t=1)=y_0 + v_y - \frac{g}{2} = y_1$$
and therefore,
$$ v_x = x_1 - x_0$$
$$ v_y = y_1 - y_0 + \frac{g}{2}$$
and finally, your movement equations are:
$$x(t)=x_0 + (x_1 - x_0) t$$
$$y(t)=y_0 + (y_1 - y_0 + \frac{g}{2}) t - g\frac{t^2}{2}$$ | {
"domain": "physics.stackexchange",
"id": 19143,
"tags": "gravity, time, projectile"
} |
Why is hydrogen to oxygen ratio used to compare energy storage efficiency? | Question: I came across an article that says that lipids are more efficient energy storage molecules compared to starch because lipids have higher “hydrogen to oxygen ratio”.
I do not understand how “hydrogen to oxygen ratio” equates to more efficient energy storage.
I am guessing that since energy production have to do with reducing NAD+, so “hydrogen to oxygen ratio” is a measure of the ability to reduce NAD+. And since NADH is then later used in oxidative phosphorylation in mitochondria to produce energy currency ATP, “hydrogen to oxygen ratio” is a measure of the potential amount of ATP the molecule is able to produce. And thus it is a measure of energy storage efficiency.
Is this correct?
Answer: Answer
In my opinion:
The original poster’s overall interpretation of the statement in the article (as
reported) is most likely correct.
Problems with original statement
Although the statement may be clear in context, I would rephrase it because of two specific objections I have to it. These are:
“Energy storage efficiency” in isolation is ambiguous. It is not clear whether it refers to storage per unit mass or per unit volume, both of which may be considerations for living organisms.
“Hydrogen to oxygen ratio” is an imperfect heuristic for the reduction state of the carbon backbone.
I would also be specific in naming the lipids involved in ‘energy storage‘ — there are other lipids (e.g. steroids) that are not used as energy stores.
So my restatement (adapted from Berg et al. ‘Biochemistry’) would be:
Triacylglycerols are more concentrated stores of metabolic energy than
polysaccharides such as starch or glycogen because they are more
highly reduced. The energy yield per gram from the complete oxidation
of triacylglycerol is over twice that from polysaccharides (38 kJ/g
cf. 17 kJ/g).
Comments on restatement
I have avoided the use of the word ‘efficiency’ for reasons of lack of precision,
already mentioned. In general I deplore its use in biology, partly
because it is seldom defined, and partly because it generally
involves the implicit assumption that the most ‘efficient’ is the
‘best’. If this were so in the current case, potatoes would be bags of oil.
There are other factors that contribute to the concentrated nature
of triacylglycerols for storage of metabolic energy. These include
dehydration and lack of branching (important for reducing the
volume).
It is important to talk in chemical terms — i.e. about oxidation and
reduction. I reproduce below part of a diagram I used to use for teaching
which summarizes the relationships between metabolic intermediates
of different oxidation states.
The above diagram shows the limitations of the heuristic of
“hydrogen to oxygen ratio”. The pair on the left (which might be found in
saturated v. unsaturated fatty acids) both have the same H:O ratio
(infinity) but are in different states of reduction.
It may be better (at least in abstract) to consider carbon atoms as the basis on which to
compare the oxidation states of different metabolites, rather than
weight, which is influenced by the oxidation state itself. | {
"domain": "biology.stackexchange",
"id": 12144,
"tags": "biochemistry, bioenergetics, lipids"
} |
Would an (NH3)2+ molecule be trigonal planar like BH3 rather than trigonal pyramidal? | Question: I've been learning about using MO theory to explain why $\ce{BH3}$ and $\ce{NH3}$ have different geometries and by following the line of reasoning used to rationalise the differences in geometry I came to wonder whether ionisation or the addition of electrons to a molecule can change its geometry.
Eg: $\ce{NH3}$ is trigonal pyramidal but an $\ce{NH3^2+}$ ion would have the same electron configuration as $\ce{BH3}$, which is trigonal planar; does this mean $\ce{NH3^2+}$ would be trigonal planar?
Edit: My bad when I first posted this I wrote $\ce{NH3^2-}$ but meant $\ce{NH3^2+}$, this was just an example, my main objective was to find out whether changing the geometry through ionisation was possible.
Answer: Not only is changing shape due to ionization possible, it has been observed. Not with ammonia as suggested in the question, but with cyclooctatetraene, which is nonplanar in its neutral state but switches to a planar, aromatic ring structure when reduced to its di-anion. | {
"domain": "chemistry.stackexchange",
"id": 10571,
"tags": "molecular-orbital-theory, molecular-structure, group-theory"
} |
Custom String Formatter | Question: I've written my custom implementation of an f"" / .format(...). I started the project thinking it was going to be longer than three lines.
from typing import List, Union, AnyStr
def format_string(string: str, variables: List[Union[str, int, float, bool, complex]]) -> str:
"""
Formats the passed string with the passed list of variables
>>> format_string("Hello, [*]", ["Ben"])
Hello, Ben
:param string -> str: String to be formatted
:param variables -> List[Union[str, int, float, bool, complex]]: List of variables to format into string
:return str: Formatted string
"""
for index, value in enumerate(variables):
string = string.replace("[*]", str(value), 1)
return string
My main question is if it's possible to make this a one-liner. It absolutely infuriates me that I have to use three. I spent a long time trying a mixture of * and ''.join to no avail. The code works, I would just like to shorten it up to one line. Of course, any and all feedback is appreciated and considered.
A secondary question is the method header. To represent variables, I have a List that can contain any types of variables. How would I go about representing this instead of having to list each type separately?
Answer: For the second point, since it's easier, typing has an Any:
from typing import List, Any
. . ., variables: List[Any], . . .
For the first, you're just doing a reduction over variables:
from typing import List, Any
from functools import reduce
def format_string(string: str, variables: List[Any]) -> str:
return reduce(lambda s, val: s.replace("[*]", str(val), 1), variables, string)
Although really, in a real use case, I'd still split this over three lines for clarity:
def format_string(string: str, variables: List[Any]) -> str:
return reduce(lambda s, val: s.replace("[*]", str(val), 1),
variables,
string)
And honestly, I might just make that function var-arg instead of grouping things in a list to make it consistent with other format functions:
def format_string(string: str, *variables: Any) -> str:
return reduce(lambda s, val: s.replace("[*]", str(val), 1), variables, string)
>>> format_string("[*] Hello [*]", 1, 2)
'1 Hello 2'
Note that when annotating a a var-arg parameter, you annotate the type of each element and ignore the type of the wrapping container (a tuple iirc). That means it's *variables: Any, not *variables: Tuple[... Any].
Of course though, whether or not this is better is a matter of taste, but this is the ideal use-case for reduce. Whenever you want to constantly reassign one thing in a simple loop, reduce is likely a good tool to look at. | {
"domain": "codereview.stackexchange",
"id": 36597,
"tags": "python, python-3.x, reinventing-the-wheel"
} |
Does an accelerating mass radiate energy? | Question: This question is actually 2 parts
Just like binary star system or the 2 black wholes (which generated the recently discovered gravitational waves) shouldn't the Earth also radiate giving off energy. Reading the Wikipedia article confirmed my belief that Earth should radiate and give off gravitational waves. Now, since the Earth is 'radiating' shouldn't it lose energy (howsoever small it may be) resulting in shrinking of its orbit (howsoever ever small the shrink may be)? Radiating means losing energy after all.
All mass (except few like neutrinos) is made up of charges and accelerating charges radiate. So normal matter, though neutral, must radiate in the sense that each of its individual charge should radiate. Shouldn't it? And there should be a net loss of energy?
Answer: Not all masses, only those that have an asymmetric mass generate gravitational waves. A perfect sphere will not.
Unlike charge, which exists in two polarities, masses always come with the same sign. This is why the lowest order asymmetry producing electromagnetic radiation is the dipole moment of the charge distribution, whereas for gravitational waves it is a change in the quadrupole moment of the mass distribution. Hence those gravitational effects that are spherically symmetric will not give rise to gravitational radiation. A perfectly symmetrical collapse of a supernova will produce no waves, while a non-spherical one will emit gravitational radiation. A binary system will always radiate. Gravitational waves distort spacetime: in other words, they change the distances between free macroscopic bodies.
Italics mine.
So for 1: the radiation from the Earth , which is almost spherical, will be very small in any case.
For 2: The elementary particles are point particles, so symmetric and they will not radiate gravitational waves in acceleration. Molecules and atoms accelerated may, if the outside orbitals are not S orbitals. Even in the last case as can be seen here it will be very small, as there is G and divisions by power of c involved.
The energy has to be provided by the source that is accelerating the objects.
The amplitude of the gravitational wave, formula 2.34 :
where εE_kin(with 0≤ε≤1), is the fraction of kinetic energy of the source that is able to produce gravitational waves. The factor ε is a measure of the asymmetry of the source and implies that only a time varying quadrupole moment will emit gravitational waves. For example, even if a huge amount of kinetic energy is involved in a given explosion and/or implosion, if the event takes place in a spherically symmetric manner, there will be no gravitational radiation | {
"domain": "physics.stackexchange",
"id": 37928,
"tags": "electromagnetic-radiation, radiation, gravitational-waves"
} |
Cardshifter login page using vanilla JavaScript | Question: I'm working on rewriting the Cardshifter HTML Client with vanilla JavaScript, the original HTML Client is written with Angular but I really wanted to stay away from anything npm and just go back to basics. Note that I have no desire to use libraries like jQuery, Underscore, etc. unless absolutely necessary.
This is the landing page where a user selects a server to connect to and login. I would like feedback on any and all facets of the code, especially if I'm using anti-patterns that I could avoid using throughout the rest of the client. All the sections are documented so I won't spend time explaining what everything does here.
If you want to skim over the trivial things, the primary code files I would like feedback on are:
sections/login/login.js
server_interface/server_interface.js
utils/loadHtml.js
Here is an animated GIF that shows some of the functionality, namely checking whether a given server can offer a valid WebSocket connection. (note that the alert showing the user name has since been removed, it was for debugging purposes.
Directory structure
Here is how my files are structured at the moment. I have a few other directories for images and such that I excluded because they are not used yet.
index.html
global.js
sections/
login/
login.html
login.js
top_navbar/
top_navbar.html
server_interface/
server_interface.js
styles/
cardshifter.css
utils/
formatDate.js
loadHtml.js
logDebugMessage.js
Code
index.html
<!DOCTYPE html>
<html>
<head>
<title>Cardshifter</title>
<!-- Bootstrap -->
<link href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/css/bootstrap.min.css" rel="stylesheet" />
<!-- Local styles -->
<link rel="stylesheet" href="styles/cardshifter.css" />
<!-- Local JavaScript -->
<script src="global.js"></script>
<script src="server_interface/server_interface.js"></script>
<script src="utils/loadHtml.js"></script>
<script src="utils/formatDate.js"></script>
<script src="utils/logDebugMessage.js"></script>
<!-- Local Section JavaScript -->
<script src="sections/login/login.js"></script>
<!-- Favicon links -->
<link rel="apple-touch-icon" sizes="57x57" href="images/favicon/apple-icon-57x57.png" />
<link rel="apple-touch-icon" sizes="60x60" href="images/favicon/apple-icon-60x60.png" />
<link rel="apple-touch-icon" sizes="72x72" href="images/favicon/apple-icon-72x72.png" />
<link rel="apple-touch-icon" sizes="76x76" href="images/favicon/apple-icon-76x76.png" />
<link rel="apple-touch-icon" sizes="114x114" href="images/favicon/apple-icon-114x114.png" />
<link rel="apple-touch-icon" sizes="120x120" href="images/favicon/apple-icon-120x120.png" />
<link rel="apple-touch-icon" sizes="144x144" href="images/favicon/apple-icon-144x144.png" />
<link rel="apple-touch-icon" sizes="152x152" href="images/favicon/apple-icon-152x152.png" />
<link rel="apple-touch-icon" sizes="180x180" href="images/favicon/apple-icon-180x180.png" />
<link rel="icon" type="image/png" sizes="192x192" href="images/favicon/android-icon-192x192.png" />
<link rel="icon" type="image/png" sizes="32x32" href="images/favicon/favicon-32x32.png" />
<link rel="icon" type="image/png" sizes="96x96" href="images/favicon/favicon-96x96.png" />
<link rel="icon" type="image/png" sizes="16x16" href="images/favicon/favicon-16x16.png" />
<link rel="manifest" href="images/favicon/manifest.json" />
<meta name="msapplication-TileColor" content="#ffffff" />
<meta name="msapplication-TileImage" content="images/favicon/ms-icon-144x144.png" />
<meta name="theme-color" content="#ffffff" />
</head>
<body>
<!-- Top navigation bar -->
<div id="top_navbar_container">
<script>
const navbarContainerId = "top_navbar_container";
const navbarFilePath = "sections/top_navbar/top_navbar.html";
loadHtml(navbarContainerId, navbarFilePath)
.then(function() {
if (DEBUG) {
logDebugMessage(`"${navbarFilePath}" loaded OK!`);
}
});
</script>
</div>
<div class="csh-body">
<div id="login_container">
<script>
const loginContainerId = "login_container";
const loginFilePath = "sections/login/login.html";
loadHtml(loginContainerId, loginFilePath)
.then(function() {
loginHandler();
if (DEBUG) {
logDebugMessage(`"${loginFilePath}" loaded OK!`);
}
});
</script>
</div>
</div>
</body>
</html>
global.js
/*
* This file is for global values to be used throughout the site.
*/
'use strict';
/*
* Setting to `true` will log messages in the browser console
* to help in debugging and keeping track of what is happening on the page.
* This should be set to `false` on the public client.
*/
const DEBUG = true;
/*
* Port number used for WebSocket.
*/
const WS_PORT = 4243;
/*
* List of game server names and WebSocket URIs.
*/
const GAME_SERVERS = {
"localhost" : `ws://127.0.0.1:${WS_PORT}`,
"dwarftowers.com" : `ws://dwarftowers.com:${WS_PORT}`,
"zomis.net" : `ws://stats.zomis.net:${WS_PORT}`,
"Other" : ""
};
/**
* Default date format for the application.
* @type String
*/
const DEFAULT_DATE_FORMAT = "yyyy/MM/dd hh:mm:ss";
sections
sections/login/login.html
<div id="login">
<h4>Please log in to continue.</h4>
<form name="login_form" id="login_form" class="login-form">
<div id="login_server_select_container" class="form-group">
<label for="login_server_list" aria-label="Server">Server:</label>
<select name="login_server_list" id="login_server_list" class="form-control">
</select>
<div id="login_server_other_container" class="form-group" style="display : none">
<label for="login_server_other_input">Other Server:</label>
<input name="login_server_other_input" id="login_server_other_input" type="text" class="form-control" />
<input type="button" name="test_login_server_other" id="test_login_server_other" class="btn" value="Test connection" />
</div>
<input readonly name="server_loading_display" id="server_connecting" class="form-control" style="background-color: #DDD; display: none" />
<label for="login_secure">Is secure server:</label>
<input name="login_secure" id="login_secure" type="checkbox" value="secure" />
<span id="login_server_connection_status" class="label" style="display: block; text-align: left"></span>
</div>
<div id="login_username_container" class="form-group">
<label for="login_username">Username:</label>
<input name="login_username" id="login_username" type="text" class="form-control" placeholder="Enter name..." />
</div>
<div class="form-group">
<input type="button" name="login_submit" id="login_submit" type="button" class="btn btn-success" value="Log in" />
</div>
</form>
</div>
sections/login/login.js
/* global GAME_SERVERS, DEBUG, CardshifterServerAPI, DEFAULT_DATE_FORMAT */
const loginHandler = function() {
const serverSelectContainer = document.getElementById("login_server_select_container");
const serverSelect = serverSelectContainer.querySelector("#login_server_list");
const serverOtherInputContainer = serverSelectContainer.querySelector("#login_server_other_container");
const serverLoading = serverSelectContainer.querySelector("#server_connecting");
const connStatusMsg = serverSelectContainer.querySelector("#login_server_connection_status");
let currentServerHasValidConnection = null;
/**
* Adds options to the server selection based on GAME_SERVERS global.
* @returns {undefined}
*/
const populateServerSelect = function() {
for (let key in GAME_SERVERS) {
if (GAME_SERVERS.hasOwnProperty(key)) {
const option = document.createElement("option");
option.text = key;
option.value = GAME_SERVERS[key];
serverSelect.add(option);
}
}
};
/**
* Tests the WebSocket connection to a server and displays a message on the page
* to give the user information about the connection status.
* @returns {undefined}
*/
const testWebsocketConnection = function() {
const serverUri = serverSelect.value;
const isSecure = false;
let msgText = "";
if (serverUri) {
displayConnStatus("connecting", serverUri);
/**
* Test WebSocket connection and display status if successful.
* @returns {undefined}
*/
const onReady = function() {
makeServerSelectReadWrite();
msgText = displayConnStatus("success", serverUri);
if (DEBUG) { logDebugMessage(msgText); }
currentServerHasValidConnection = true;
};
/**
* Test WebSocket connection and display status if failed.
* @returns {undefined}
*/
const onError = function() {
makeServerSelectReadWrite();
msgText = displayConnStatus("failure", serverUri);
if (DEBUG) { logDebugMessage(msgText); }
currentServerHasValidConnection = false;
};
CardshifterServerAPI.init(serverUri, isSecure, onReady, onError);
makeServerSelectReadOnly(serverUri);
}
else {
displayConnStatus("unknown", serverUri);
}
};
/**
* Displays connection status in the page.
* @param {string} status - Keyword representing the connection status
* @param {type} serverUri - The URI of the server the client is connecting to
* @returns {String} - The message text, largely for debug purposes
*/
const displayConnStatus = function(status, serverUri) {
let msgText = "";
switch (status.toLowerCase()) {
case "connecting":
msgText =
`<h5>Connecting to server...</h5>` +
`<pre class='bg-warning'>` +
`Address: ${serverUri}` +
`\n${formatDate(new Date())}` +
`</pre>`;
connStatusMsg.className = "label label-warning";
connStatusMsg.innerHTML = msgText;
break;
case "success":
msgText =
`<h5>WebSocket connection OK.</h5>\n` +
`<pre class='bg-success'>`+
`Address: ${serverUri}` +
`\n${formatDate(new Date())}` +
`</pre>`;
connStatusMsg.innerHTML = msgText;
connStatusMsg.className = "label label-success";
break;
case "failure":
msgText =
`<h5>WebSocket connection FAILED.</h5>\n` +
`<pre class='bg-danger'>`+
`Address: ${serverUri}` +
`\n${formatDate(new Date())}` +
`</pre>`;
connStatusMsg.innerHTML = msgText;
connStatusMsg.className = "label label-danger";
break;
case "unknown":
default:
msgText = `<h5>Unknown connection status...</h5>`;
connStatusMsg.innerHTML = msgText;
connStatusMsg.className = "label label-default";
break;
}
return msgText;
};
/**
* Hides the `select` element and shows a read-only `input` instead.
* @param {string} serverUri
* @returns {undefined}
*/
const makeServerSelectReadOnly = function(serverUri) {
const selector = document.getElementById("login_server_list");
const connecting = document.getElementById("server_connecting");
selector.style.display = "none";
connecting.style.display = "block";
connecting.value = `Connecting to ${serverUri}...`;
};
/**
* Makes the server `select` element visible and hides the read-only `input`
* @returns {undefined}
*/
const makeServerSelectReadWrite = function() {
const selector = document.getElementById("login_server_list");
const connecting = document.getElementById("server_connecting");
selector.style.display = "block";
connecting.style.display = "none";
};
/**
* Displays an input field for server address if "Other" server is selected.
* @returns {undefined}
*/
const handleServerSelectChanges = function() {
if (serverSelect.value) {
serverOtherInputContainer.style.display = "none";
}
else {
serverOtherInputContainer.style.display = "block";
}
};
/**
* Attempts to login to game server.
* @returns {undefined}
*/
const tryLogin = function() {
const username = document.getElementById("login_username").value;
if (!username) {
displayNoUsernameWarning();
}
else {
const isSecure = false;
var loggedIn = null;
let serverUri = serverSelect.value;
if (!serverUri) {
serverUri = document.getElementById("login_server_other_input").value;
}
/**
* Short-circuit login attempt if we've already found that the connection not valid.
* @type String
*/
if (!currentServerHasValidConnection) {
const msg = "Websocket error(error 1)";
console.log(msg);
displayLoginFailureWarning(msg);
}
/**
* Attempt to log in once the WebSocket connection is ready.
* @returns {undefined}
*/
const onReady = function() {
let login = new CardshifterServerAPI.messageTypes.LoginMessage(username);
/**
* Listens for a welcome message from the game server, and stores user values in the browser.
* @param {Object} welcome
* @returns {undefined}
*/
const messageListener = function(welcome) {
const SUCCESS = 200;
const SUCCESS_MESSAGE = "OK";
if(welcome.status === SUCCESS && welcome.message === SUCCESS_MESSAGE) {
localStorage.setItem("username", username);
localStorage.setItem("id", welcome.userId);
localStorage.setItem("playerIndex", null);
localStorage.setItem("game", { "id" : null, "mod" : null });
}
else {
console.log(`${new Date()} server message: ${welcome.message}`);
loggedIn = false;
}
};
try {
CardshifterServerAPI.setMessageListener(messageListener, ["loginresponse"]);
CardshifterServerAPI.sendMessage(login);
}
catch(error) {
const msg = "LoginMessage error(error 2)";
if (DEBUG) { logDebugMessage(`${msg} ${error}`); }
displayLoginFailureWarning(msg, error);
loggedIn = false;
}
};
/**
* Log error if the connection fails
* @returns {undefined}
*/
const onError = function() {
const msg = "Websocket error(error 1)";
if (DEBUG) { logDebugMessage(msg); }
displayLoginFailureWarning(msg);
loggedIn = false;
};
CardshifterServerAPI.init(serverUri, isSecure, onReady, onError);
}
};
/**
* Displays a warning if no username is entered.
* @returns {undefined}
*/
const displayNoUsernameWarning = function() {
const container = document.getElementById("login_username_container");
if (!container.querySelector("#login_username_missing_msg")) {
const msg = document.createElement("span");
msg.id = "login_username_missing_msg";
msg.className = "label label-danger";
msg.innerHTML = "Please enter a username.";
container.appendChild(msg);
}
};
const displayLoginFailureWarning = function(message, error) {
const container = document.getElementById("login_username_container");
const warning = document.createElement("span");
warning.id = "login_failure_msg";
warning.className = "label label-danger";
warning.style = "display: block; text-align: left;";
warning.innerHTML = `<h5>Login failed: ${message}</h5>`;
if (error) {
warning.innerHTML += `<pre>${error}</pre>`;
}
container.appendChild(warning);
};
const testOtherServerConnection = function() {
const otherServerInput = document.getElementById("login_server_other_input");
const otherServerUri = otherServerInput.value;
const isSecure = false;
/**
* Test WebSocket connection and display status if successful.
* @returns {undefined}
*/
const onReady = function() {
makeServerSelectReadWrite();
msgText = displayConnStatus("success", otherServerUri);
if (DEBUG) { logDebugMessage(msgText); }
currentServerHasValidConnection = true;
};
/**
* Test WebSocket connection and display status if failed.
* @returns {undefined}
*/
const onError = function() {
makeServerSelectReadWrite();
msgText = displayConnStatus("failure", otherServerUri);
if (DEBUG) { logDebugMessage(msgText); }
currentServerHasValidConnection = false;
};
CardshifterServerAPI.init(otherServerUri, isSecure, onReady, onError);
makeServerSelectReadOnly();
displayConnStatus("connecting", otherServerUri);
};
/**
* IIFE to setup the login handling for the page it is loaded in.
* @type undefined
*/
const runLoginHandler = function() {
populateServerSelect();
document.getElementById("login_server_list").addEventListener("change", handleServerSelectChanges, false);
document.getElementById("login_server_list").addEventListener("change", testWebsocketConnection, false);
document.getElementById("login_submit").addEventListener("click", tryLogin, false);
document.getElementById("test_login_server_other").addEventListener("click", testOtherServerConnection, false);
testWebsocketConnection();
}();
};
sections/top_navbar/top_navbar.html
<nav id="top_navbar" class="navbar navbar-inverse">
<div class="container-fluid">
<div class="navbar-header">
<!-- TODO fix this logic -->
<div class="navbar-brand csh-top-link">Cardshifter</div>
</div>
<form class="navbar-form">
<ul class ="navbar-form navbar-left" style="margin-top: 8px;">
<li class="dropdown">
<a href="#" class="dropdown-toggle csh-dropdown-link" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">
Mods
<span class="caret"></span></a>
<ul class="dropdown-menu">
<li class="cyborg-font">Cyborg Chronicles</li>
<li class="cyborg-font"><a href=#>Game rules</a></li>
<li class="cyborg-font"><a href=#>Cards</a></li>
<li role="separator" class="divider"></li>
<li class="mythos-font">Mythos</li>
<li class="mythos-font"><a href=#>Game rules</a></li>
<li class="mythos-font"><a href=#>Cards</a></li>
</ul>
</li>
</ul>
<ul class ="navbar-form navbar-left" style="margin-top: 8px;">
<li class="dropdown">
<a href="#" class="dropdown-toggle csh-dropdown-link" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">
Help
<span class="caret"></span></a>
</li>
</ul>
<ul class ="navbar-form navbar-left" style="margin-top: 8px;">
<li class="dropdown">
<a href="#" class="dropdown-toggle csh-dropdown-link" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">
About
<span class="caret"></span></a>
</li>
</ul>
<div class="form-group navbar-form navbar-left">
<input name="disconnect_websocket" id="disconnect_websocket" type="button" value="Log Out" class="btn btn-navbar csh-button" />
</div>
<div class="form-group navbar-form navbar-left">
<input name="display_console" id="display_console" type="button" value="Console" class="btn btn-navbar csh-button" />
</div>
</form>
</div>
</nav>
server_interface
server_interface/server_interface.js
"use strict";
// checks if the string begins with either ws:// or wss://
const wsProtocolFinder = /ws(s)*:\/\//;
/*
* Enum for WebSocket ready state constants.
* @enum {number}
*/
const readyStates = {
CONNECTING : 0,
OPEN : 1,
CLOSING : 2,
CLOSED : 3
};
const MAIN_LOBBY = 1;
let eventTypes = [];
/**
* The base class Message for all the other message types
* to inherit from.
*
* TODO: Would it just be easier to set the `.command` property
* individually for each card type?
*
* @param {string} command - The command of the message.
*/
const Message = function(command) {
this.command = command;
};
/**
* The exception that is thrown when the code is trying to
* interact with the API when the API has not been
* initialized with `.init` yet.
*
* @param {string} message - Informational message about the exception.
*/
const NotInitializedException = function(message) {
this.name = "NotInitializedException";
this.message = message || "";
};
/**
* The exception that is thrown when the code is telling the
* API to interact with the socket when the socket is not
* ready to accept any information.
*
* @param {string} message - Informational message about the exception.
* @param {number} readyState - Ready state constant from WebSocket API, https://developer.mozilla.org/en-US/docs/Web/API/WebSocket
*/
const SocketNotReadyException = function(message, readyState) {
this.name = "SocketNotReadyException";
this.message = message || "";
this.readyState = readyState;
};
/*
* Returns all the keys of an object and its inherited keys.
* This is used so `JSON.stringify` can get the `.command` of a message.
*
* @param {Object} obj - The object to flatten
* @return {Object} - a new Object, containing obj's keys and inherited keys
* @source http://stackoverflow.com/questions/8779249/how-to-stringify-inherited-objects-to-json
*/
const flatten = function(obj) {
let result = Object.create(obj);
for(let key in result) {
// TODO this assignment is weird, why is `result[key]` being assigned to its own value?
result[key] = result[key];
}
return result;
};
/*
* Singleton object to handle communication via WebSocket between the client
* and the game server.
*/
const CardshifterServerAPI = {
socket: null,
messageTypes: {
/*
* Incoming login message.
* A login message from a client to add a user to the available users on the server.
* This login message is required before any other action or message can be performed between a client and a server.
* @constructor
* @param {string} username - The incoming user name passed from client to server, not null
* @example Message: <code>{ "command":"login","username":"JohnDoe" }</code>
*/
LoginMessage : function(username) {
this.username = username;
},
/*
* Request available targets for a specific action to be performed by an entity.
* These in-game messages request a list of all available targets for a given action and entity.
* The client uses this request in order to point out targets (hopefully with a visual aid such as highlighting targets)
* that an entity (such as a creature card, or a player) can perform an action on (for example attack or enchant a card).
* @constructor
* @param {number} gameId - The Id of this game currently being played
* @param {number} id - The Id of this entity which requests to perform an action
* @param {string} action - The name of this action requested to be performed
*/
RequestTargetsMessage : function(gameId, id, action) {
this.gameId = gameId;
this.id = id;
this.action = action;
},
/*
* Make a specific type of request to the server.
* This is used to request an action from the server which requires server-side information.
* @constructor
* @param {string} request - This request
* @param {string} message - The message accompanying this request
*/
ServerQueryMessage : function(request, message) {
this.request = request;
this.message = message;
this.toString = function() {
return `ServerQueryMessage: Request${this.request} message: ${this.message}`;
};
},
/*
* Request to start a new game.
* This is sent from the Client to the Server when this player invites another player (including AI)
* to start a new game of a chosen type.
* @constructor
* @param opponent - The Id of the player entity being invited by this player
* @param gameType - The type / mod of the game chosen by this player
*/
StartGameRequest : function(opponent, gameType) {
this.opponent = opponent;
this.gameType = gameType;
},
/*
* Serialize message from JSON to byte.
* Primarily used for libGDX client.
* Constructor.
* @param type - This message type
*/
TransformerMessage : function(type) {
this.type = type;
},
/*
* Message for a game entity to use a certain ability.
* Game entities (e.g., cards, players) may have one or more ability actions that they can perform.
* Certain abilities can have multiple targets, hence the use of an array.
* @constructor
* Used for multiple target actions.
*
* @param gameId - This current game
* @param entity - This game entity performing an action
* @param action - This action
* @param targets - The set of multiple targets affected by this action
*/
UseAbilityMessage : function(gameId, id, action, targets) {
this.gameId = gameId;
this.id = id;
this.action = action;
this.targets = targets;
this.toString = function() {
return ``
+ `UseAbilityMessage`
+ `[id=${this.id},`
+ `action=${this.action},`
+ `gameId=${this.gameId}`
+ `targets=${this.targets.toString()}]`
;
};
},
/*
* Chat message in game lobby.
* These are messages printed to the game lobby which are visible to all users present at the time the message is posted.
* @constructor
* @param {string} message - The content of this chat message
*/
ChatMessage : function(message) {
this.chatId = MAIN_LOBBY;
this.message = message;
this.toString = function() {
// TODO where does that `from` param/var come from?
return `ChatMessage [chatId=${chatId}, message=${message}, from=${from}]`;
};
},
/*
* Request to invite a player to start a new game.
* @constructor
* @param id - The Id of this invite request
* @param {string} name - The name of the player being invited
* @param gameType - The game type of this invite request
*/
InviteRequest : function(id, name, gameType) {
this.id = id;
this.name = name;
this.gameType = gameType;
},
/*
* Response to an InviteRequest message.
* @constructor
* @param inviteId - Id of this incoming InviteRequest message
* @param {boolean} accepted - Whether or not the InviteRequest is accepted
*/
InviteResponse : function(inviteId, accepted) {
this.inviteId = inviteId;
this.accepted = accepted;
},
/*
* Player configuration for a given game.
* @constructor
* @param gameId - This game
* @param {string} modName - The mod name for this game
* @param {Map} configs - Map of player name and applicable player configuration
*/
PlayerConfigMessage : function(gameId, modName, configs) {
this.gameId = gameId;
this.modName = modName;
this.configs = configs;
this.toString = function() {
return ``
+ `PlayerConfigMessage{`
+ `configs=${configs}, `
+ `gameId=${gameId}, `
+ `modName='${modName}'`
+ `}`
;
};
}
},
/*
* Initializes the API for use.
*
* This sets up all the message types to inherit the main `Message` class, and sets
* up the websocket that will be used to communicate to the server, and to recieve
* information from the server.
*
* @param {string} server - The server address to connect to
* @param {boolean} isSecure - Whether to use SSL for the connection (NOT IMPLEMENTED)
* @param onReady - Function to assign to `socket.onopen`
* @param onError - Function to assign to `socket.onerror`
*/
init : function(server, isSecure, onReady, onError) {
let types = this.messageTypes;
// TODO find out why this unused variable is here
let self = this; // for the events
types.LoginMessage.prototype = new Message("login");
types.RequestTargetsMessage.prototype = new Message("requestTargets");
types.ServerQueryMessage.prototype = new Message("query");
types.StartGameRequest.prototype = new Message("startgame");
types.TransformerMessage.prototype = new Message("serial");
types.UseAbilityMessage.prototype = new Message("use");
types.ChatMessage.prototype = new Message("chat");
types.InviteRequest.prototype = new Message("inviteRequest");
types.InviteResponse.prototype = new Message("inviteResponse");
types.PlayerConfigMessage.prototype = new Message("playerconfig");
NotInitializedException.prototype = new Error();
SocketNotReadyException.prototype = new Error();
// secure websocket is wss://, rather than ws://
const secureAddon = (isSecure ? "s" : "");
// if the protocol is not found in the string, store the correct protocol (is secure?)
const protocolAddon = (wsProtocolFinder.test(server) ? "" : `ws${secureAddon}://`);
let socket = new WebSocket(protocolAddon + server);
socket.onopen = onReady;
socket.onerror = function() {
onError();
this.socket = null;
};
this.socket = socket;
},
/**
* Sends a message to the server
*
* @param {Object} message - The message to send
* @error SocketNotReadyException - The socket is not ready to be used
* @error NotInitializedException - The API has not yet been initialized
*/
sendMessage : function(message) {
const socket = this.socket;
// TODO find out why this unused variable is here
let self = this;
if (socket) {
if (socket.readyState === readyStates.OPEN) {
this.socket.send(JSON.stringify(flatten(message)));
}
else {
throw new SocketNotReadyException("The Websocket is not ready to be used.", socket.readyState);
}
}
else {
throw new NotInitializedException("The API has not yet been initialized.");
}
},
/**
* Sets an event listener for when the server sends a message and
* the message type is one of the types in types
*
* @param listener - The function to fire when a message of types is received
* @param {string[]} types - (OPTIONAL) Only fire the listener when the message type is in this array
* @param {Object} timeout - (OPTIONAL) The function(.ontimeout) to call after MS(.ms) of no reply
*
* TODO: Maybe a timeout will be needed? Pass in a function and a MS count.
*/
setMessageListener : function(listener, types, timeout) {
eventTypes = types;
this.socket.onmessage = function(message) {
var data = JSON.parse(message.data);
if (eventTypes) {
if(eventTypes.indexOf(data.command) !== -1) { // if contains
listener(data);
}
}
else {
listener(data);
}
};
},
/**
* Adds types to the types to listen for in the message event listener
*
* @param {string[]} types - The types to add
*/
addEventTypes : function(types) {
eventTypes = eventTypes.concat(types);
},
/**
* Removes the message event listener
*/
removeMessageListener : function() {
this.socket.onmessage = null;
}
};
utils
utils/formatDate.js
/* global DEFAULT_DATE_FORMAT */
/**
* Formats a Date object based on a format string, e.g., "yyyy/MM/dd hh:mm:ss"
* Original source:
* https://dzone.com/articles/javascript-formatdate-function
* Original source modified to fix a few bugs and modernize.
*
* @param {Date} date - the Date to format
* @param {String} formatString - the format string to use
* @returns {String} - the formatted date
*/
const formatDate = function (date, formatString=DEFAULT_DATE_FORMAT) {
if(date instanceof Date) {
const months = new Array("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec");
const yyyy = date.getFullYear();
const yy = yyyy.toString().slice(-2);
const M = date.getMonth() + 1;
const MM = M < 10 ? `0${M}` : M;
const MMM = months[M - 1];
const d = date.getDate();
const dd = d < 10 ? `0${d}` : d;
const h = date.getHours();
const hh = h < 10 ? `0${h}` : h;
const m = date.getMinutes();
const mm = m < 10 ? `0${m}` : m;
const s = date.getSeconds();
const ss = s < 10 ? `0${s}` : s;
formatString = formatString.replace(/yyyy/, yyyy);
formatString = formatString.replace(/yy/, yy);
formatString = formatString.replace(/MMM/, MMM);
formatString = formatString.replace(/MM/, MM);
formatString = formatString.replace(/M/, M);
formatString = formatString.replace(/dd/, dd);
formatString = formatString.replace(/d/, d);
formatString = formatString.replace(/hh/, hh);
formatString = formatString.replace(/h/, h);
formatString = formatString.replace(/mm/, mm);
formatString = formatString.replace(/m/, m);
formatString = formatString.replace(/ss/, ss);
formatString = formatString.replace(/s/, s);
return formatString;
} else {
return "";
}
};
utils/loadHtml.js
/* global fetch, DEBUG */
"use strict";
/*
* Replicates the functionality of jQuery's `load` function,
* used to load some HTML from another file into the current one.
*
* Based on this Stack Overflow answer:
* https://stackoverflow.com/a/38132775/3626537
* And `fetch` documentation:
* https://developer.mozilla.org/en-US/docs/Web/API/WindowOrWorkerGlobalScope/fetch
*
* @param {string} parentElementId - The ID of the DOM element to load into
* @param {string} htmlFilePath - The path of the HTML file to load
*/
const loadHtml = function (parentElementId, filePath) {
const init = {
method: "GET",
headers: { "Content-Type": "text/html" },
mode: "cors",
cache: "default"
};
// Return Promise from `fetch` allows to use `.then` after call.
return fetch(filePath, init)
.then(function (response) {
return response.text();
})
.then(function (body) {
// Replace `#` char in case the function gets called `querySelector` or jQuery style
if (parentElementId.startsWith("#")) {
parentElementId.replace("#", "");
}
document.getElementById(parentElementId).innerHTML = body;
if (DEBUG) {
console.log(`File "${filePath}" loaded into element ID "${parentElementId}"`);
}
})
.catch(function(err) {
throw new FailureToLoadHTMLException(
`Could not load "${filePath} ` +
`into element ID "${parentElementId}"` +
`\n${err}`
);
});
};
const FailureToLoadHTMLException = function(message) {
this.name = "FailureToLoadHTMLException";
this.message = message;
this.stack = (new Error()).stack;
};
FailureToLoadHTMLException.prototype = new Error;
utils/logDebugMessage.js
/* global DEFAULT_DATE_FORMAT */
/**
* Log a debug message to the browser's JavaScript console.
* @param {String} msg
* @param {String} dateFormat
* @returns {undefined}
*/
const logDebugMessage = function(msg, dateFormat=DEFAULT_DATE_FORMAT) {
const timestamp = new Date();
console.log(`DEBUG | ${formatDate(timestamp, dateFormat)} | ${msg}`);
};
Answer: Very neat project! I really like that you are avoiding using a library, it's a great way to get better at JS.
I see the if (DEBUG) { logDebugMessage() } pattern several times, it might be worth bringing the conditional inside the logDebugMessage function.
formatDate has a ton of repetitive code, I'd recommend taking advantage of .replace's second parameter. There's probably some way to reduce this further...
const formatDate = function (date, formatString = DEFAULT_DATE_FORMAT) {
if (!(date instanceof Date)) {
return ""
}
const months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
const format = {
yyyy: date.getFullYear(),
M: date.getMonth() + 1,
d: date.getDate(),
h: date.getHours(),
m: date.getMinutes(),
s: date.getSeconds()
}
format.yy = format.yyyy.toString().slice(-2);
format.MM = format.M < 10 ? `0${format.M}` : M;
format.MMM = months[date.getMonth()];
format.dd = format.d < 10 ? `0${format.d}` : format.d;
format.hh = format.h < 10 ? `0${format.h}` : format.h;
format.mm = format.m < 10 ? `0${format.m}` : format.m;
format.ss = format.s < 10 ? `0${format.s}` : format.s;
const regex = /yyyy|yy|MMM|MM|M|dd|d|hh|h|mm|m|ss|s/g
return formatString.replace(regex, s => format[s] || s);
};
The original formatDate function will have a problem in May if MMM is included in the format string.
It might be worth allowing formatDate to accept no arguments, in which case it uses the current date.
loadHtml looks good to me with two exceptions. First, if the passed id starts with #, the # will not be removed as the comment implies it should. Second, if you define a logDebugMessage function, use it! Drop the console.log.
In server_interface.js, I'd recommend the source of the flatten method, it explains how it works.
Get rid of unused variables. There are plenty of tools which can check if a variable is used, you should get rid of them instead of just commenting about them.
Since you are using let and const, you can use Array.prototype.includes instead of Array.prototype.indexOf to check for an element in an array.
The ChatMessage toString method won't work. JS isn't C# - it's not possible to drop this and still access the instance variables. Same goes for PlayerConfigMessage.
Instead of using the LoginMessage : function(username) pattern, you can use LoginMessage(username).
wsProtocolFinder does not check for wss at the start of the string. It will also match wssssss://. The regex should be /^wss?:\/\//.
I know the goal of this project is to avoid any npm packages, however you should really consider at least using a linting program (if only installed locally). Pretty much all linting programs could have caught the problems with this mentioned above and can also alert you to the unused variables. | {
"domain": "codereview.stackexchange",
"id": 29942,
"tags": "javascript, html, websocket"
} |
How can I calculate how far through the day the prime meridian is of different planets | Question: I'm trying to create a widget that shows the 'time' on different planets. It will show how far through the day/night cycle (as a percentage) a point on the planet is. It has been easy to scale down the cycle from 24 hr to eg ~10hr day of Jupiter however I am struggling to work out how I can 'set' the time.
For Mars this has been straight forward, using the comparison of MTC and UTC of when the Curiosity rover landed but I can't find similar data for other planets to anchor time from earth to the other planets.
Is there a data base for this kind of information?
Answer: NASA's Navigation Ancillary Facility (NAIF) publishes planetary constants kernels (PCK) which are basically text files containing pole orientations for the largest known bodies.
PCKs includes the parametric orientation of their prime meridian in agremeent with IAU standards (meaning datum is J2000.0).
The latest PCK dates from 2011 and is actively used by most operational interplanetary flight projects across the world.
Using the NAIF SPICE library (available in Fortran, C, Matlab and IDL) you can load this kernel and read the orientation of the prime meridian for your body and date of interest.
Time offset between bodies can be reconstructed as angular offset between meridians. But you may find some time-related functions within SPICE which can make your task easier. | {
"domain": "astronomy.stackexchange",
"id": 2692,
"tags": "rotation, time"
} |
Sending Goal to action server fails | Question:
I am trying to send goals to the move_base action server, as per
the ROS tutorial . My action server is the one created by roslaunch move_base_amcl_2.5cm.launch from the navigation_stage package.
I always get the server error "Failed to find a valid control". This is also the case no matter what launch file I use from the navigation_stage package. I desperately want one single test case (no matter how simple) for which the base_local_planner is used to find the goal.
In addition, setting the initialpose from the command line hangs; the message is latched, but there is no effect in Stage.
FYI, my ros version is 1.2.4 and I use cturtle. As I have other packages working fine in this environment, I don't want to upgrade to diamondback and the newest nav stack unless there is no option.
Originally posted by PKG on ROS Answers with karma: 365 on 2011-06-25
Post score: 0
Answer:
I resolved the problem by installing diamondback with Boost 1.40.0. The boost threads I had was causing multiple issues, including crashing stage-ros frequently. Thanks, Eitan.
Originally posted by PKG with karma: 365 on 2011-06-30
This answer was ACCEPTED on the original site
Post score: 0 | {
"domain": "robotics.stackexchange",
"id": 5960,
"tags": "ros, navigation, move-base, motion-planners"
} |
If a planet's core is heated by its gravitational pressure, where does that energy come from? | Question: I just saw a show about the theorized "Planet 9." One possibility is that it's an ice planet. But it could have a liquid water core from its gravitational energy crushing in the core and making it heat up. Where does the energy for that heat come from? If the gravitational energy comes from the planet's mass, shouldn't the energy be constant? Unless some other part of the process make the planet lose mass.
Answer: There are two sources of the heat of a planet's core:
There is the original potential energy of the asteroids that fell together to form the planet.
The material of the core is such a good insulator, that the small amount of radioactivity that occurs naturally in the core material is enough to be the input of energy that keeps the core molten after the planet has formed.
See
https://www.scientificamerican.com/article/why-is-the-earths-core-so/
http://phys.org/news/2006-03-probing-earth-core.html | {
"domain": "physics.stackexchange",
"id": 35756,
"tags": "thermodynamics, gravity, pressure, potential-energy, planets"
} |
Work done by gravity | Question: When an object is lifted up it gains potential energy equal to $mgh$. When it is dropped from this height, gravity does work on it (which is also $mgh$) and this work is converted into the kinetic energy.
But what happens to the potential energy that it initially possessed when it was at a height? Shouldn't the object now have $mgh+\frac{1}{2} mv^2$ as the total energy ( previous potential energy + work done by gravity) ?
Answer: We define the change in potential energy of an object in gravitational field as the negative of the work done by the gravity.
$$\Delta{U}=-W=-mgh\cos{\theta}$$
When the object is at height $h$:
$$U_1=mgh$$
$$\implies U_2=0$$
This is potential energy of object at just above the ground.
Then where the energy has gone?
It has been converted into kinetic energy of the object.
I.E.
$$K.E=mgh$$
But you would say that $K.E=\frac{1}{2}mv^2$. Ok see what happens to this.
When object is at height h and moves to just above the ground under the influence of gravity so that it gains a final velocity $v$:
$$2aS=v_f ^2 - v_i ^2$$
$$\implies 2gh=v^2 - 0=v^2$$
$$\implies K.E=\frac{1}{2}m(2gh)$$
$$\implies K.E=mgh$$
Now if your object hits the ground, some of its kinetic energy is converted into heat and some into sound and some into other forms therefore the object does not go back to the same height where it was in the very beginning. | {
"domain": "physics.stackexchange",
"id": 35197,
"tags": "newtonian-gravity, energy-conservation, work, potential-energy"
} |
How to determine the strength of a laser beam needed to cut through a specific material? | Question: How can it be determined how strong a laser beam needs to be in order to cut through specific material?
For example, if I have a 5mm thick piece of brown cardboard, is there a way to determine how strong the laser has to be (how many Watt) in order to cut a hole in it?
Answer: Firstly, I am a little worried by your inclusion of the "eye" tag for this question. If, in the small likelihood, that your question is probing the threshold level for eye safety by finding the minimum power that will cut the retina and then declare smaller powers safe, then I need to warn you that you cannot do this by calculation but must instead refer to the ISO 60825 laser safety standard to find out whether a beam is safe for viewing.
Assuming this is not so, then let's answer you question. You not only need its power but also:
An assessment of its beam quality, i.e. the wavefront aberration of its output wave. Laser beams can in particular suffer astigmatism (See Wiki page of this name);
The numerical aperture of the focussing optics (let this be $\eta$);
The heat transfer properties of the material to be cut (in particular the diffusion length (See the Example"solution in one dimension: diffusion length" heading on the "Fick's laws of diffusion" Wiki page) as well as the conductivity and density of the material;
The Kindling (or Autoignition) Temperature or equivalent temperature at which a material mechanically breaks down.
Then, the spotsize $s$ of a perfectly focussed, unaberrated beam is of the order of $\frac{\lambda}{\eta}$. For maximum efficiency, you need the beam to focus to a point of diameter smaller than the heat diffusion length. Note that this implies a minimum numerical aperture. Then you can calculate the steady state temperature distribution assuming a point source of the power in question from the heat diffusion equation. You then calculate the power you need to raise the local temperature about the kindling or equivalent breakdown temperature.
If the beam is aberrated, then you will need to calculate the point spread function for the wave and calculate whether the spotsize is still less than the heat diffusion length, then you will need to solve the heat diffusion equation in detail given the intensity distribution implied by the PSF and then calculate the power needed to raise the local temperature over the breakdown temperature.
If you want to use an unfocussed beam, then you will need to use the beam's profile and do the calculation of the last paragraph.
This is all very complicated, which is why questions such as yours are often settled emprirically.
Another means of cutting is to use a highly focussed beam to make little "explosions" in the material, causing it to spit little packets of material from the surface in a process called ablation. The power needed for this is again often found empirically, although one can roughly calculate the magnitude of the shock wave induced by swift heating to see whether it exceeds the material's strength. If not, then you have no hope of inducing ablation. | {
"domain": "physics.stackexchange",
"id": 29097,
"tags": "laser, laboratory-safety"
} |
Super-strongly connected components? | Question: I face a problem that is related to (strongly) connected components. Let $G=(V,E)$ be an undirected graph.
I want to find subgraphs $G_1,G_2, \dots,G_n$ of $G$ such that
they do not overlap (i.e. don't share any nodes)
each two nodes in a subgraph are connected by an edge, i.e. $\forall i \forall n,m\in V_i$ then $\{m,n\}\in E_i$ where $G_i=(V_i,E_i)$.
My question is: How to solve this problem? Is there any specific name of this problem?
Edit: The graph I am dealing with is very sparse. Coloring based approximations may not work as the complement graph would be huge (not able to store it in memory).
Answer: This is called the Minimal Clique Cover Problem, and is NP-hard: as a matter of fact, the decision version ("can I do it with only $k$ subgraphs?") is one of Karp's original 21 Problems, the ones that first defined NP-completeness.
Since it's linked to graph coloring, you can't even get a good approximation in polynomial time, unfortunately: everything about this problem is hard.
For further reading, your "super-strongly connected components" are generally called cliques, and a clique cover is a way to "cover" the entire graph with non-overlapping cliques. A minimal clique cover uses the smallest possible number of cliques to do it. | {
"domain": "cs.stackexchange",
"id": 11951,
"tags": "graphs"
} |
Solution for sum of all fizzbuzz numbers in a given range | Question: The number fizz buzz is composed of two numbers that are fizz and buzz.
fizz represents the numbers that are divisible by 3 and buzz are the numbers that are divisible by 5.
combining both fizz buzz are the numbers that are divisible by both 3 and 5 both. So how can we get all the fizz buzz numbers sum in a given range?
If we analyze and try to find out the numbers that are divisible by both 3 and 5 we get series like this:
3*5*1, 3*5*2, 3*5*3, .....,3*5*n
analyzing this here is my C# code to find all fizz buzz numbers sum in a given range.
I am looping through the entire range and add all numbers whose remainder with 15 is 0.
int start_range = Convert.ToInt32(Console.ReadLine());
int end_range = Convert.ToInt32(Console.ReadLine());
int sum = 0;
for(int i= start_range; i<= end_range; i++)
{
if(i%15==0)
{
sum = sum + i;
}
}
Console.WriteLine(sum);
Console.ReadKey();
Answer: I don't have much to say for the code itself, it is concise, readable and does what you want it to do. That's a good start. My only critique is that you handle input and output in the same place, if you plan on reusing the code maybe consider making a method.
So since the code is on point, we can look at the algorithm you are using. The only time the sum will change is when the sequence passes a multiple of 15. So lets see if we can find which multiples of 15 are between our starting point (x) and our ending point (y).
If we start at x, the first multiple of 15 we will encounter will be
$$15 \lceil \frac{x}{15} \rceil$$
To give you some intuition as to why this is the case, my argument is as follows. When you divide a number by 15, you get a whole part (how many 15s you can make from x) and a remainder (how many are left over when you make as many 15s from x as you can). The next multiple of 15 after (or including) x will be the first number you get when counting upwards who's remainder is 0. In other words, the remainder is a progress bar of how close to 15 we are. By taking the ceiling of x / 15, we are setting the progress bar to full, so when we multiply by 15 we get the next multiple.
What is the last multiple before (or including) y? We can use similar logic to get
$$15 \lfloor \frac{y}{15} \rfloor$$
And now we can directly loop over multiples of 15, no need to do any mods.
int start = 15 * (int)Math.Ceiling((double)start_range / 15);
int end = 15 * (int)Math.Floor((double)end_range / 15);
int sum = 0;
for (int i = start; i <= end; i += 15) {
sum += i;
}
We don't have to stop there. This is a straightforward enough sum so we can somewhat simplify it.
$$start + (start + 15) + (start + 30) + ... + end$$
We can write start and end as multiples of 15 (where a and b are just integers).
$$15a + (15a + 15) + (15a + 30) + ... + 15b$$
We will now factor out 15 term.
$$15(a + (a + 1) + (a + 2) + ... + b)$$
int a = (int)Math.Ceiling((double)start_range / 15);
int b = (int)Math.Floor((double)end_range / 15);
int sum = 0;
for (int i = a; i <= b; i++) {
sum += i;
}
sum *= 15;
I think the sum is simple enough now that we can do it without a loop. I'm going to "cheat" a little and just introduce the identity we will use without much explanation. There are some nice visualizations and proofs online if you want to look them up yourself.
$$ a, b \in \mathbb{N}; \sum_{i=a}^{b}{i} = \sum_{i=0}^{b}{i} - \sum_{i=0}^{a-1}{i} $$
The sum of numbers from 0 to n is well known
$$ \sum_{i=0}^{n}{i} = \frac{n^2+n}{2} $$
So we can use this equation to derive
$$ \sum_{i=a}^{b}{i} = \frac{b^2+b}{2} - \frac{(a-1)^2 + a - 1}{2} $$
$$ = \frac{b^2+b}{2} - \frac{a^2 - 2a + 1 + a - 1}{2} = \frac{b^2+b}{2} - \frac{a^2 - a}{2} $$
$$ = \frac{b^2+b - a^2 + a}{2} $$
This is only a couple of lines of code now, and should run in constant time.
int a = (int)Math.Ceiling((double)start_range / 15);
int b = (int)Math.Floor((double)end_range / 15);
int sum = 15 * (b*b + b - a*a + a) / 2;
Final remarks, in many languages integer division will floor the result (once the numbers are both positive). This means we can avoid Math.Floor if we assume end_range >= 0 (which seems like a fair assumption).
We also have the constant 15 strewn around the code, let's make it a variable.
int num = 15; // A magic number specific to our problem
int a = (int)Math.Ceiling((double)start_range / num);
int b = end_range / num;
int sum = num * (b*b + b - a*a + a) / 2;
We could also avoid the Math.Ceiling call but the code may lose it's readability. The code also doesn't account for bugs like overflow, and it could be reworked to delay those issues. Currently, I don't think either of these changes would be worth it. | {
"domain": "codereview.stackexchange",
"id": 31735,
"tags": "c#, performance, algorithm, fizzbuzz"
} |
Calculation of Moment of inertia by exploiting the fact that it only depends upon the distribution of mass with respect to the principal axis | Question: First of all what do I even mean when I say ‘the method of exploitation of the fact that only mass distribution is what matters with respect to the principal axis(I will be referring this method as exploitation method for the post)’. let me illustrate this by describing the method and then afterwards by an example.
The Reason why Exploitation method works
I believe that the exploitation method is a very powerful method. Through this method moment of inertia can be calculated for the objects in reference to objects that have same mass distribution as them. Let me prove this method by stating a few points:-
Moment of inertia calculation of a continuous object is simply the addition of the particles making it up at various distances from the principal axis.
The only way to change $\mathrm{I}$ of these particles is by changing either the mass or the distance from the principal axis. It must mean that in the given below figure all have same moment of inertia. (source)
Now consider a system of particles in 2 cases and note that in both the cases, moment of inertia will also be same.(source:MSPaint)
The case of a hollow cone
Here Consider a cone of height $\mathrm{H}$ and radius $\mathrm{R}$ now to find the moment of inertia of this hollow cone. Now to calculate the moment of inertia of this one may use integration by taking a ring of mass $\mathrm{dm}$ but I know of the above technique that if this cone is put under a hydraulic press then finally we would be getting a disc of same mass and radius. This may be more imaginable if we consider many elementary rings making up the cone. But the mass distribution would be same thus the moment of inertia of both objects will be equal as shown below:-
The Question Is about a sphere
It can be seen that this method won’t work here as imagining the similar approach. We can divide the sphere into two hemispheres and consider similar approach as above as in putting under a hydraulic press. Through that, we would be getting a disc but we know that, $$\mathrm{I_{disc} \neq I_{Hemisphere}}$$
Therefore, my question is as follows;
Why does this method not work for hemisphere? And for what general cases is this method not valid?
Answer: In the comments, @Buraian almost has it right. The key thing is not that $\frac{dr}{dh}$ is constant, but rather that the ratio of area elements is constant:
\begin{align}
dA_{\text{cone}}&=\frac{L}{R}\,dA_{\text{disc}}
\end{align}
where $L=\sqrt{H^2+R^2}$ is the slant height of the cone. In this case, the constancy of $\frac{dr}{dh}$, and the rotational symmetry of the cone/circle imply the above constancy of the ratio of the area elements, which is why everything works out nicely for the cone example.
Let us actually be more explicit about where exactly this constancy is invoked. Let $\sigma_0$ be the constant surface mass density on the cone, and let $r$ be the coordinate describing the distance to the $z$-axis. The way to calculate the moment of inertia is
\begin{align}
I_{\text{cone}}&:=\int_{\text{cone}}r^2\,dm_{\text{cone}}\\
&=\int_{\text{cone}}r^2\sigma_0\,dA_{\text{cone}}\\
&=\int_{\text{disc}}r^2\sigma_0\,\frac{L}{R}\,dA_{\text{disc}}\tag{$1$}\\
&\equiv\int_{\text{disc}}r^2\,\rho_0\,dA_{\text{disc}}\tag{$*$}\\
&=I_{\text{disc}}
\end{align}
where $(1)$ uses the change of variables formula to convert the integration form the cone to the disc (I'm using the "vertical map" corresponding to your "hydraulic press" analogy, i.e take a point $(x,y,z)$ on the surface of the cone and project it down to $(x,y)$ in the disc). In formula $(*)$, I have defined a new quantity $\rho_0:=\sigma_0\frac{L}{R}$. This is to be interpreted as the mass density of the disc after you "vertically compress" the cone. Because $\sigma_0$ was constant and because the area elements are proportional by a constant, it follows that $\rho_0$ is also a constant. This is why you're able to unambiguously refer to $M$ as the total mass of either the cone/disc and in the very last equality, you can use the formula for the moment of inertia of a disc (with uniform density) to conclude that $I_{\text{cone}}=I_{\text{disc}}=\frac{MR^2}{2}$.
One thing to note above is that while the total mass remains constant, the density changes (it changes from the constant value $\sigma_0$ on the cone to the constant value $\rho_0$ on the disc). In the case of a sphere, things are worse. It is no longer true that the areas scale in a constant fashion. In fact (for a unit hemisphere and disc),
\begin{align}
dA_{\text{hemisphere}}&=\frac{1}{\sqrt{1-x^2-y^2}}\,dA_{\text{disc}}
\end{align}
This is why your approach fails for the disc (even if $\sigma_0$ started out constant, you'd end up with $\rho(x,y)=\sigma_0\cdot \frac{1}{\sqrt{1-x^2-y^2}}$ which is non-constant hence the integral must be evaluated directly). You must actually start with a mass density like $\sigma(x,y,z)=cz$ for some constant $c$ in order to "cancel out" the geometric effect of how the areas transform so that in the end you get a constant $\rho_0$. | {
"domain": "physics.stackexchange",
"id": 80379,
"tags": "moment-of-inertia"
} |
There is a way that I could run $ roscore and don't record log datas? | Question:
There is a way that I could run $ roscore and don't record log datas?
Or, record a specific size of data and delete de older ones?
Originally posted by JoaoPedro on ROS Answers with karma: 33 on 2014-07-22
Post score: 0
Answer:
You can't turn off logging completely in ROS, but you can adjust the logger levels with the appropriate log4cxx and python logging configuration files as described in rosconsole configuration and rospy logging configuration
Originally posted by ahendrix with karma: 47576 on 2014-07-22
This answer was ACCEPTED on the original site
Post score: 1
Original comments
Comment by tfoote on 2014-10-25:
There are also many generic log rotation tools which could be applied to the output to limit size. | {
"domain": "robotics.stackexchange",
"id": 18719,
"tags": "roscore"
} |
How to find speed when accelerating down a slanted wire | Question: I saw this picture on one of my social media sites with the caption, "I'd do this in a heart beat! Who's with me!"
I was about to go balls to the walls and say, "I'm in! When and where??" But then I got to thinking, how fast would I be going when I hit the water? If I were going too fast, would it hurt me?
SO I was trying to figure this out, and I'm not very good at physics so I was wondering if you guys could help me out.
I estimate the guy is 90 kg in mass, the wire is angled pi/6 from the horizontal and he's about 50 meters above the water when he starts (all estimates...).
What is the formulas I need to figure out the speed the guy will be going once he hits the water? I know there's some calculus in there, and I'm pretty good at calculus.
Answer: The only force which works is gravity$^1$. So, change in gravitational potential energy equals final Kinetic energy(assume initial is zero).
$$mgh=mv^2/2$$
$$v=\sqrt{2gh}$$
here $h$ is vertical height traversed.See the velocity does not depend on angle of string, mass of body too..
Let's see the kinematics of body.
The length of string is $h cosec\theta$ ($\theta $ being angle with horizontal assumed $\pi/6$)
acceleration of body along the string=$g\sin\theta$
Now $\text{using} : v^2=u^2+2as$
$$v^2=0+2\times h cosec\theta\times g \sin\theta$$
$$v=\sqrt{2gh}$$
Working in differentials
for $v$ along the rope.
$$dv/dt=v\dfrac{dv}{dx}=a$$
$$\int_0^{v_f} v.dv=\int_0^{hcosec\theta} a.dx=ax\Bigg|_0^{hsosec\theta}$$
$$\dfrac{v_f^2}2=gsin\theta.hcosec\theta \ \ ; \ \ a=gsin\theta$$
$1)$Assuming the pulley being used to slide to be friction less.Though not possible.Also the rope is assumed to be in-extensible and straight. | {
"domain": "physics.stackexchange",
"id": 7646,
"tags": "acceleration, speed"
} |
Geometry with differential angles | Question: In the solution to a problem, the author considers the normal force provided by an arc length of string with a differential subtended angle size, $\textrm{d}\theta$. The author reasons that this normal force is provided by the nonzero sum of the tension vectors on both ends of this small arc length. These tension vectors nearly cancel, but they do not entirely cancel since their directions are almost but entirely antiparallel to each other. The resultant sum is the normal force.
What I cannot follow are the author's geometric arguments. Here is the figure provided:
The author reasons that if the subtended angle is $\textrm{d}\theta$, then the radial component of both tension vectors is $T\sin{\textrm{d}\theta/2}$. I do not follow how the subtended angle of $\textrm{d}\theta$ leads to an angle of $\textrm{d}\theta/2$ in that small right triangle.
Answer: Imagine a vector $\mathbf{T}$ tangent to a circle. If you move the vector on an arc subtending angle $\theta$, then the vector at its final position rotates by the same angle, and it's projection on the axis to its initial position is $|\mathbf{T}|\sin\theta$. Here the tensions at each end have equal magnitude (necessary condition for lack of stretching), but as the arc opens by an infinitesimal amount $d\theta$ you can imagine the two vectors rotating in opposite directions, but they will go only half the way. Thus both project on the vertical axis with component $|\mathbf{T}|\sin(d\theta/2)$ as pointed out by the author. | {
"domain": "physics.stackexchange",
"id": 58938,
"tags": "vectors, geometry"
} |
Fission inside nuclear reactor | Question: I have studied about nuclear reactors and also know their functionality.But some how i couldn't get answers for some my questions.
1) During fission reaction enormous amount of heat is produced and this heat is in the form of flames?
2)If so does the whole reactor core is burnt?and how come water can be in contact with the burning core?
3)Once a fission process is finished will reactor be ready for next usage or it will left for years for less radioactive?
4) Whether the chain reaction is started by introducing neutrons or done automatically by spontaneous emission of fuel rods?
5)If it is due to neutron introduction then how these neutrons attack the pellets containing uranium correctly and how many tries are required?
I have searched for many sources online but couldn't get a clear image.Hope get now.
Answer: 1) There are no flames inside a reactor, because the reactor contains a cooling system which removes the heat (and uses it to boil water and spin a turbine), thereby maintaining the reactor core safely below the temperature at which it would burn. Failure of the cooling system will allow the core to catch fire and burn (as it did in the Chernobyl accident).
2) Water is not in contact with anything that is burning. It is inside pipes that run through the core, and the core is not "on fire".
3) As the reactor operates, it uses up the fissionable fuel loaded into its core and its power output falls gradually. At some point, the spent fuel is extracted from the core and fresh fuel is put into it so the reactor can keep operating. The reactor itself is made radioactive by the fission occurring inside it and after its other components (pumps, valves, pipes, etc.) are worn out, the entire device has to be carefully disassembled (so the radioactive parts can be safely stored) or covered up.
4) The chain reaction starts by itself by spontaneous fission once there is enough fuel placed in its core to make it go critical. No external neutron source is needed.
5) This process is complex, and depends on the details of the design of the core and its cooling system. It is dealt with in detail in the field of nuclear engineering. | {
"domain": "physics.stackexchange",
"id": 57436,
"tags": "nuclear-physics, nuclear-engineering"
} |
Deprotonation of 2- and 3-methylquinoline | Question: Why are there many examples of 2-methylquinolines being treated with a base then an electrophile but none of the corresponding reaction with 3-methylquinolines? Does this have to do with the charge not being stabilised for the 3-methylquinoline?
Answer: Yes, in contrast to 3-methylquinoline, the methyl group of 2-methylquinoline (1) is easily deprotonated by potassium ethoxide.
The anion has been examined by NMR spectroscopy (DOI), but its use in synthesis was reported long before. There's an article (in German) in Chem. Ber., 1909, 42, 1140 (DOI) where the reaction of 2-methylquinoline with diethyloxalate is described. | {
"domain": "chemistry.stackexchange",
"id": 6678,
"tags": "organic-chemistry, acid-base, aromatic-compounds"
} |
Is saturation of olfactory cells able to create a feeling of another odour when it stops? | Question: I'm looking for at least one scientific study about how odours could impact human olfactory system and create some phenomenon that could be loosely compared to retina remanence.
As an example, with standard eyes, when you look at a cyan area for a long time and then look at a white paper, the paper looks yellow for a while (see some fun experiments here). This seems to be called an afterimage.
In the type of study I'm looking for, if it exists, the olfactory cells would be involved instead of the retina.
Being exposed to a strong odour for some time would make the subject smell an other odour (a complementary odour?) when the strong odour disappear even if nothing casts this new "complementary" odour to the nose. We could call this an "afterodour" or a "complementary ordour".
Answer: The problem with odors is that no known odorant hits only one olfactory receptor. A saturating concentration of compound X for receptor A might be saturating another receptor B, but not receptor C. If you now decrease the concentration, receptor A is still saturated, receptor B is only partially activated and receptor C is not activated at all. Since smell is perceived as the sum of inputs from all olfactory receptors, the smell at the different concentrations will be perceived totally different because of the different receptor activation. Exposing a nose to a strong stimulus will always stimulate more than one receptor, so saturation will occur at more than one receptor.
Perception of light on the other hand depends on finely tuned receptors which are specific for a certain wavelength range. Once the receptors were tuned to have few very specific receptors and once they were tuned to have a lot of unspecific receptors. I like the idea but I doubt it can be confirmed in the lab, also I couldn't find any reports supporting, not that this is any proof. | {
"domain": "biology.stackexchange",
"id": 5646,
"tags": "human-biology, olfaction, nose, odour"
} |
Variant of the knapsack problem | Question: How would you approach the knapsack problem in a dynamic programming situation if you now have to limit the number of item in the knapsack by a constant $p$ ? This is the same problem (max weight of $W$, every item have a value $v$ and weight $w$) but you can only add $p$ item(s) to the knapsack and obviously need to optimize the value of the knapsack.
Do we need a 3rd dimension or we could find an other approach without it. I tried to simply add the number of item in the knapsack in the cell and taking the max value at the end with the number of item <= $p$ but it is not the BEST solution.
Answer: Very nice question!
You are twice right:
Propagating the number of items in the knapsack does not lead to optimal solutions.
One solution consists of adding a third dimension. This is rather simple but it is necessary to take some facts into account when doing so. Note however that it is not the only alternative
In the following, I am assuming that you are familiar with the solution based in dynamic programming. In particular, I will not discuss how to traverse the table backwards to determine the solution.
Let us first focus on the typical case: the number of items is unrestricted. In this case, you just build a table $T$ where $T_{i,j}$ contains the optimal value when the overall capacity of the knapsack equals $i$ and only the first $j$ items are considered. From here:
$T_{i,j} =\max\{T_{i,j-1}, T_{i-w_j,j-1}+v_j\}$
where $w_j$ and $v_j$ stand for the weight and value of the $j$-th item respectively. If $C$ is the overall capacity of your knapsack and there are in total $N$ items the optimal solution is given by $T_{C, N}$. This algorithm is known to run in pseudo-polynomial time and one of its beauties is that it only considers those combinations that fit the maximum capacity.
However, this is not enough when adding your constraint: a maximum number of items $p$. The reason is that the previous recurrence formula does not take into account different combinations of items:
First, if $T_{i,j-1}<(T_{i-w_j,j-1}+v_j)$ then $T_{i,j}=(T_{i-w_j,j-1}+v_j)$ so that the $j$-th item is added to the knapsack in spite of the maximum number of items considered, $p$ ---so that you might be violating your constraint. Well, you might be tempted here to apply the preceding formula keeping track of the number of items inserted at each step and do not add others if the number of items currently in the knapsack exceeds $p$ but,
Second, if $T_{i,j-1}>(T_{i-w_j,j-1}+v_j)$ then $T_{i,j}=T_{i,j-1}$ so that this item is not added but that might be a big mistake in case the optimal solution $T_{i,j-1}$ already consists of the maximum number of items to insert into the knapsack. The reason is that we are not properly comparing: on one hand, to preserve the optimal solution consisting of $p$ items selected among the previous $(j-1)$; on the other hand, to insert the $j$-th item and, additionally consider the best subset with $(p-1)$ items among the previous $(j-1)$.
So that a first solution consists of adding a third dimension. For your case, let $T_{i, j, k}$ be the optimal solution when the capacity of the knapsack is $i$, only the first $j$ items are considered and it is not allowed to put more than $k$ items in the knapsack. Now,
If you are computing $T_{i, j, k}$ for a number of items strictly less or equal than the number of items that can be inserted ($j\leq k$) then proceed as usual but using the same value of $k$: $T_{i,j,k} =\max\{T_{i,j-1,k}, T_{i-w_j,j-1,k}+v_j\}$
Now, if you have to compute $T_{i, j, k}$ for a number of items strictly larger than the number of items that can be inserted ($j> k$) then: $T_{i,j,k} =\max\{T_{i,j-1,k}, T_{i-w_j,j-1,k-1}+v_j\}$
The first expression should be clear. The second works since the $(k-1)$-th layer of the table $T$ keeps track of the best combination of $(k-1)$ items among the first $(j-1)$ as required above.
An efficient implementation of this algorithm does not need to compute $T_{i, j, k}$ for all $k$. Note that the preceding recurrence relationships relate layer $k$ with $(k-1)$ and thus, it is possible to alternate between two successive layers (e.g., if you are interested in the optimal solution with $k=4$ you just use two consecutive layers: 0 and 1, 1 and 2, 2 and 3, 3 and 4 and you're done). In other words, this algorithm takes twice the memory required by the traditional approach based on dynamic programming and thus, it can be still run in pseudo-polynomial time.
Be aware, however, that this is not the only solution! And there is another you might find more elegant. In the preceding formulae, we retrieved the optimal solution which consisted of no more than $(k-1)$ items among the first $(j-1)$ as $T_{i, j-1, k-1}$. However, it should be clear that this is precisely equal to $\max\limits_{p=0,j-1}\{T_{i, p}\}$ just by using the original table!! ie., the optimal solution with no more than $k$ items can be also retrieved by considering the optimal solutions with 1 item, 2 items, 3 items, ... $(j-1)$ items ... To make this formulation work you should also keep track of the number of items considered in every partial solution so that you will need two integers per cell. This memory occupation results in precisely the same memory requirements of the algorithm shown above (using a third dimension in the form of layers $k$).
Hope this helps, | {
"domain": "cs.stackexchange",
"id": 2069,
"tags": "algorithms, optimization, dynamic-programming, knapsack-problems"
} |
MATLAB's designfilt vs butter function | Question: I am relatively new to signal processing, and have always used MATLAB's designfilt option for my signal processing needs. Recently, however, I have seen people just using MATLAB's butter function. We all primarily just analyze EEG signals. I am just curious as to what makes them do this, as I currently don't see a major difference between the two aforementioned functions.
Again, I am pretty new to signal processing.
Answer: The function butter, as the name indicates, is used to construct the Butterworth IIR filter.
The function designfilt can be used to design the Butterworth filter as well, but you can design the other type filters with this function.
What type filter you need depends on your application. For example, the Butterworth filter rolls off more slowly around the cutoff frequency than the Chebyshev filter or the Elliptic filter, and it may decide your selection.
From the point of view of a coder, the use of designfilt can be preferable even when constructing the Butterworld filter. The function uses a name-value syntax and enables one to create a digitalFilter object:
d = designfilt(resp,Name,Value)
, d is the digitalFilter object. The digitalFilter object bundles all the parameters of a linear filter into a single container. The object digitalFilter is immutable, and after it is created, you can edit it only with the Filter Design Assistant that you open with the command designfilt(d).
On the other hand, the syntax of butter is simpler, the filter type stands out, and the function explicitly returns the matrix of coefficients.
But the most prominent feature of butter is that, using the syntax
[___] = butter(___,'s')
with the option 's' indicated (contrast it with default 'z' used for the digital filter design), butter returns the matrix of coefficients for the analog filter design -- which, by definition, the digitalFilter object cannot readily provide. | {
"domain": "dsp.stackexchange",
"id": 10107,
"tags": "matlab, eeg"
} |
Is it possible to re-identify a particular electron? are there identity conditions for electrons? | Question: I think that being able to formulate a clear identity criterion for the objects it deals with is important for any theory.
For example, set theory starts by the extensionality axiom telling that
set A is identical to set B iff A and B have exactly the same elements.
[ In the same way, arithmetics says that number a and number b are equal (i.e. are exactly the same number) iff
a-b = 0 ]
Consequently, I'm led to ask the question: what are the identity conditions for electrons?
In which case and under which conditions (sufficient and necessary) can I say that this electron e2 that I observe now is the same electron as electron e1 I observed previously?
Answer: All electrons, like all elementary particles, are completely identical and thus indistinguishable. They can be in different quantum states, but you cannot be sure which electron is in which state.
The reason that all electrons are identical is that they are all quanta of one quantum field describing electrons and positrons that extends throughout the universe. There is just one quantum field for each kind of elementary particle/antiparticle. Physics is not about the $10^{80}$ particles in (the observable part of) our universe; it is about the 17 (by one way of counting, in today’s Standard Model) quantum fields in our universe.
The identicality of fermions such as electrons is the basis for the Pauli exclusion principle. Without it, chemistry and biology as we know them would not be possible because atoms would have all of their electrons in the ground state. The identicality of bosons such as photons is the basis for technology such as lasers. | {
"domain": "physics.stackexchange",
"id": 63482,
"tags": "electrons, soft-question"
} |
How would one go about making a cheap freeze spray? | Question: I'm interested in making a liquid freeze spray, for a fun science project. My only experience with this comes in spraying inverting canned air dusters, which spray a -60F mist/liquid (liquid if you spray while the can is inverted). I'd like to make a spray that I can store and use. And if it can't be stored more than a few hours, then hopefully something easy to make on the spot.
My chemistry understanding is high-school level basic, so I do not have a working knowledge of chemistry. On one end of the spectrum, I know there is liquid nitrogen, but it's expensive and complex to store correctly (high-pressure canisters).
I found this list of cooling bath configurations. Could someone point out which configuration can be stored long term (months), stored cheaply (e.g. a plastic or thin metal thermos; not a heavy high-pressure fire-extinguisher container), and be a liquid when "sprayed" (assuming keeping conditions in our pressure and room temperature)? I'd like a spray temperature of less than -60F, so ice and water won't cut it.
I'm thinking of going with dry ice and > 90% concentration ethanol. I have two questions regarding this approach and a random question:
If you have a solution of half dry ice and half 99% ethanol, how can you keep it at a low temperature? Won't the solution slowly return to room temperature? My canned air dusters always spray out at the -60F; I'd like something like that if possible.
Can this solution be stored in a cheap plastic or thin metal container? Will there be some pressure buildup from the dry ice that will eventually crack or explode the container? Assuming a 10 oz container with a solution of half dry ice and half > 90% ethanol.
How are canned air dusters always able to spray their liquid solution (if inverted while sprayed) at -60F, even if they are stored for months at room temperature? Is it because the contents of the can are under pressure and that this pressure makes the liquid cold?
Answer: The reason the 'canned air' dusters and other aerosol-based things like anti-perspirant cans produce a cryogenic effect is from the decompression involved. They are stored at room temperature, but the can is pressurised.
There are two effects that happen when you spray out some aerosol.
Firstly, by the ideal gas law, $PV=nRT$, when you release the pressure (i.e. lower $P$), the temperature ($T$) also drops. (The increase in volume $V$ mostly compensates for this, though.)
Secondly, the contents of the can are stored under enough pressure to keep at least one of the parts of the mixture in a liquid state. When you release the pressure by spraying it out, the liquid boils and evaporates, and takes energy in the form of heat to do so. You'll notice that the contents of a canned air duster, if you hold them upside-down when you spray, include more than just air -- there's also a liquid in there which is designed to be mostly inert to electronics and to quickly evaporate just as described above when the pressure inside the can drops, maintaining the pressure needed to produce a gas jet. If you spray for an extended period of time, you'll normally notice a boiling sound coming from inside the can, and the can getting cold, as that liquid evaporates. (As a safety note, these substances are often flammable, so don't use these air dusters near naked flames or ignition sources unless you want fire.)
Most people don't have the equipment needed to produce this sort of thing at home.
None of the cooling baths in that list are designed to be left as-is for months at a time -- they're designed to be made up as and when needed, with occasional addition of liquid nitrogen or dry ice to keep them cool. While you could produce a cold pressurised spray on the spot using dry ice and ethanol, there's a high probability that any sort of robust switched-release mechanism could make the vessel you're using explode under the pressure of sublimating carbon dioxide. Also, unless you relied on a pressure source other than the carbon dioxide itself, your spray probably wouldn't have time to get very cold.
So, I don't recommend it. | {
"domain": "chemistry.stackexchange",
"id": 621,
"tags": "home-experiment, experimental-chemistry"
} |
Post-measurement state after homodyne measurements of part of the system | Question: Let us have an $N$ mode Gaussian state and B denote the last mode (for simplicity I just consider a two-modes state)
It is said in articles (and for example How to find the covariance matrix after a partial homodyne measurement?) that partial measurement of a quadrature (say the $q$ one) on the mode $B$ will affect the covariance matrix of the mode(s) A.
However I find this contradicting, here is my argument :
$q_A$ and $q_B$ are commutating operators because they operate on different spaces of the tensor product of Hilbert spaces, so according to (https://quantumcomputing.stackexchange.com/q/27912/), a measurement of $q_A$ gives the same probabilities as the process of measuring $q_B$, discarding the result, then measuring $q_A$ therefore the variance of $q_A$ should be the same in both cases, which is not the case here, for example in this particular matrix of a two-mode state just for example, the variance switched from a to a-c^2/b:
What am I missing?
N.B the example comes from section 7.2.1 of https://arxiv.org/abs/1703.09278
Answer: The formula (84) describes the covariance matrix (CM) of the state after a homodyne measurement of part of the system, given you know (and use) the measurement outcome.
More precisely, after measurement, the state of the unmeasured part of the system has always the same CM (the one of (84)), but displaced by a displacement which is determined by the measurement outcome.
Thus, if you ignore the measurement outcome (that is, average over it), you obtain another CM, which is the same as you would have obtained by tracing, i.e., $\Sigma_A$ (which is indeed an upper bound on the CM in (84)).
On the other hand, if you use the measurement outcome, you can correct for the displacement, and obtain a smaller uncertainty.
Note that this is not surprising: If you measure a maximally (or, for bosons, highly) entangled state, you will know the state of A to arbitrary precision: It will be equal/opposite to the state of the measured system. | {
"domain": "physics.stackexchange",
"id": 90490,
"tags": "quantum-mechanics, quantum-information, quantum-optics, quantum-states, quantum-measurements"
} |
Any successful examples of rosdep python-qt-bindings on Electric? | Question:
I can't figure out how to use a rosdep for python-qt-bindings in Electric on Ubuntu Oneiric.
It works in Fuerte. I define a compatible rosdep.yaml stanza as follows:
python-qt-bindings:
ubuntu:
lucid:
apt:
packages: [python-qt4, python-qt4-dev, python-sip-dev, python-qt4-gl]
oneiric:
apt:
packages: [python-pyside.qtcore, python-pyside.qtgui, libpyside-dev, libshiboken-dev, shiboken, libgenrunner-dev, python-qt4, python-qt4-dev, python-sip-dev, python-qt4-gl]
The install appears to work, then fails at the end:
$ rosdep install art_teleop
Executing script below with cwd=/tmp
{{{
#!/bin/bash
#Packages ['python-pyside.qtcore python-pyside.qtgui libpyside-dev libshiboken-dev shiboken libgenrunner-dev python-qt4 python-qt4-dev python-sip-dev python-qt4-gl']
sudo apt-get install python-pyside.qtcore python-pyside.qtgui libpyside-dev libshiboken-dev shiboken libgenrunner-dev python-qt4 python-qt4-dev python-sip-dev python-qt4-gl
}}}
[sudo] password for joq:
Reading package lists... Done
Building dependency tree
Reading state information... Done
python-qt4 is already the newest version.
python-qt4-dev is already the newest version.
python-sip-dev is already the newest version.
libgenrunner-dev is already the newest version.
libpyside-dev is already the newest version.
libshiboken-dev is already the newest version.
python-pyside.qtcore is already the newest version.
python-pyside.qtgui is already the newest version.
python-qt4-gl is already the newest version.
shiboken is already the newest version.
0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded.
successfully installed python-qt-bindings
rosdep python-qt-bindings failed check-presence-script after installation
rosdep install ERROR:
failed to install python-qt-bindings
Originally posted by joq on ROS Answers with karma: 25443 on 2012-04-03
Post score: 1
Answer:
Probably hard to tell w/o throwing more instrumentation into the code, but probably a bug. A little surprising as the detection code in rosdep 2 is a pretty direct port of the Electric code. The detection code runs " dpkg-query -W -f='${Package} ${Status}\n' " and simply parses the output.
Originally posted by kwc with karma: 12244 on 2012-04-03
This answer was ACCEPTED on the original site
Post score: 1
Original comments
Comment by joq on 2012-04-04:
OK. I'll open a defect ticket and we can pursue it there: https://code.ros.org/trac/ros/ticket/3914 | {
"domain": "robotics.stackexchange",
"id": 8854,
"tags": "rosdep, ros-electric"
} |
Integer array to string | Question: This function creates takes an int * buffer and creates a neatly formatted string (useful for printing the contents of an array).
Is the code easy to follow?
Is it efficient?
Am I allocating and freeing memory properly?
Bonus question: is there an idiomatic way to do this in C, such as through a set of library routines?
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
char *int_array_to_string(int *arr, size_t length)
{
char **strings = calloc(length, sizeof(char *));
int num_chars = 0;
for (size_t i = 0; i < length; i++)
{
char *num_string = calloc(32, sizeof(char)); // 32 digits should fit any int
sprintf(num_string, "%d", arr[i]);
strings[i] = num_string;
num_chars += strlen(num_string);
}
num_chars += 2 * (length - 1); // extra bytes for comma and space chars following the numbers (except for the last number)
num_chars += 2; // bytes for curly braces at the beginning and end
char *result = calloc(num_chars, sizeof(char));
size_t i = 0;
result[i++] = '{';
for (size_t j = 0; j < length; j++)
{
char *str = strings[j];
for (size_t k = 0; k < strlen(str); k++)
result[i++] = str[k];
free(str);
result[i++] = ',';
result[i++] = ' ';
}
free(strings);
result[num_chars - 1] = '}';
return result;
}
Answer:
result is not zero-terminated. You should allocate one byte more.
To my taste, there are too many allocations. Compute the required size, and allocating once. You already allocate 32 bytes per array element, so
char * result = malloc(length * 32 + whatever_extra_space_necessary);
Now recall that sprintf returns an amount of bytes it actually printed, and remove a superfluous call to strlen:
char * where = result;
for (size_t i = 0; i < length; i++) {
size_t printed = sprintf(where, "%d, ", arr[i]);
where += printed;
}
Your code prints an unpleasant ", " after the last element of an array. If it is a conscious decision, it is all right; if it is not, consider printing the first element separately:
print("%d", arr[0]);
for (i = 1; ....)
print(", %d", arr[i]); | {
"domain": "codereview.stackexchange",
"id": 43384,
"tags": "c, strings, array, formatting, integer"
} |
JavaScript Todo App | Question: I wrote a todo app with JavaScript and want to have it reviewed to see if I can make it better.
I think my JavaScript is messy and can be improved a lot. Please share your thoughts and way to improve this (I think my JavaScript can be half of the code to do the exact same thing).
Here is my app.
var mainInput = document.getElementById("mainInput");
var totalTodo = document.getElementById('totalTodos');
// Set this to give unique id on each todo list
var todoid = '0';
var todoDone = '0';
// Add todos - Trigger on Enter
mainInput.addEventListener("keyup", function(e){
var key = e.which || e.keyCode;
if (key === 13) {
if (mainInput.value === ''){
// If empty value entered
//console.log('Empty value');
return false;
}else{
// If value passed
todoid++;
//console.log(todoid);
// Create element to hold new value
var node = document.createElement("LI");
// Add data attribute on li
node.setAttribute('id', 'todo_' + todoid);
// Greate i element - Check icon
var nodeTwo = document.createElement("I");
// Add i into li element
node.appendChild(nodeTwo);
// Add Classes into i
nodeTwo.className = 'ion-ios-circle-outline';
nodeTwo.setAttribute('id', 'child_todo_' + todoid);
// Save textnode from the value
var textnode = document.createTextNode(mainInput.value);
// Add that into li
node.appendChild(textnode);
// Greate i element - Remove icon
var nodeThree = document.createElement("I");
// Add i into li element
node.appendChild(nodeThree);
// Add Classes into i
nodeThree.className = 'ion-ios-close-empty';
nodeThree.setAttribute('id', 'close_child_todo_' + todoid);
// Add new list on the main div
document.getElementById('todoList').appendChild(node);
// Clear input
mainInput.value = '';
//console.log(mainInput.value);
// Update total todos
totalMsg(todoid, todoDone);
//console.log('Total ' + todoid + ' todos to do');
}
}
});
// Fuction to show total message
function totalMsg(todoid, todoDone){
if(todoid === 1){
return totalTodos.innerHTML = '<b>' + todoid + '</b> thing to do / <b>' + todoDone + '</b> completed';
}else{
return totalTodos.innerHTML = '<b>' + todoid + '</b> things to do / <b>' + todoDone + '</b> completed';
}
}
// Function to check Class name on elements
function hasClass(element, cls) {
return (' ' + element.className + ' ').indexOf(' ' + cls + ' ') > -1;
}
/* Completed todo
** http://jsfiddle.net/founddrama/ggMUn/
** http://stackoverflow.com/questions/646628/how-to-check-if-a-string-startswith-another-string
*/
document.querySelector('body').addEventListener('click', function(event) {
if (event.target.id.startsWith("child_todo_")) {
// Get clicked item's ID
var todoClicked = event.target.id;
//console.log(todoClicked);
var todoClickedElement = document.getElementById(todoClicked);
todoClickedElement.classList.toggle('completed');
var parent_tce = todoClickedElement.parentElement.id;
//console.log(parent_tce);
var completedList = document.getElementById(parent_tce);
completedList.classList.toggle('completedlist');
// if todoClickedElement has completed class add or substract
//console.log(hasClass(todoClickedElement, 'completed'));
if(hasClass(completedList, 'completedlist')){
todoDone++;
totalMsg(todoid, todoDone);
}else{
todoDone--;
totalMsg(todoid, todoDone);
}
}
});
// Remove todo
document.querySelector('body').addEventListener('click', function(event) {
// Select all elements have ID starts with "child_todo_close_"
if (event.target.id.startsWith("close_child_todo_")) {
var closeClicked = event.target.id;
//console.log(closeClicked);
var closeClickedElement = document.getElementById(closeClicked);
var parent_cce = closeClickedElement.parentElement.id;
// Update total todo number
var willBeRemoved = document.getElementById(parent_cce);
todoid--;
if(todoDone != 0 && hasClass(willBeRemoved, 'completedlist')){
todoDone--;
}
if(todoid === 0){
totalTodos.innerHTML = '';
}else{
totalMsg(todoid, todoDone);
}
// Remove clicked todo list
document.getElementById(parent_cce).remove();
}
});
*{
box-sizing: border-box;
-moz-box-sizing: border-box;
}
html, body{
font-family: 'Open Sans', sans-serif;
font-size: 16px;
color: #222;
}
.todoApp{
max-width: 400px;
margin: 0 auto;
}
.totalTodos{
font-size: 80%;
}
.todoList{
margin: 0;
padding: 0;
}
.todoApp li{
padding: 0;
list-style: none;
}
.todoApp i{
width: 30px;
height: 30px;
font-size: 24px;
text-align: center;
display: inline-block;
float: left;
color: #999;
padding: 0 4px;
cursor: pointer;
}
.completed:before{
content: "\f3fe" !important;
}
.completedlist{
text-decoration: line-through;
color: #12A74D;
}
.completedlist i:first-child{
color: #12A74D;
}
.todoInput li:first-child i{
display: none;
}
.todoApp i:last-child{
float: right;
}
.todoList{
margin-top: 10px;
}
.todoList > li{
background: #f8f8f8;
padding: 20px 5px;
border-bottom: 1px solid #eee;
}
/* Form elements */
.todoApp input{
width: 100%;
padding: 10px;
border: none;
border-bottom: 1px solid #ccc;
font-size: 16px;
font-weight: 300;
outline: none;
}
<html>
<!--
Todo app with Javascript
Created by O (Ohsik Park) Feb/2016
** http://www.OhsikPark.com
** Feel free to talk me! o@ohsikpark.com
-------------------------------------------------------------
License: GNU General Public License
License URI: http://www.gnu.org/licenses/gpl-2.0.html
Library used:
- Google Fonts (https://www.google.com/fonts)
- ionicons (http://ionicons.com/)
-->
<head>
<title>Todo app with Javascript</title>
<link type="text/css" rel="stylesheet" href="css/style.css">
</head>
<body>
<div class="todoApp" id="todoApp">
<h1>Todo App with Javascript</h1>
<li id="todoInput">
<input type="text" name="mainInput" id="mainInput" placeholder="What needs to be done?" />
</li>
<p id="totalTodos" class="totalTodos"></p>
<ul id="todoList" class="todoList"></ul>
</div>
<link href='https://fonts.googleapis.com/css?family=Open+Sans:400,700' rel='stylesheet' type='text/css'>
<link href='http://code.ionicframework.com/ionicons/2.0.1/css/ionicons.min.css' rel='stylesheet' type='text/css'>
<script src="js/todo.js"></script>
</body>
</html>
Answer: This code is really not so bad, however I see some things that could be changed.
Firstly I point out some code-style improvents.
13 as key code seems to be like a magic number (see Unnamed numerical constants). It would be better to create new var called ENTER_KEY_CODE and use it instead of 13. If you used more key codes, then you would create object keyCodes and save code for each needed key: var keyCodes = {ENTER: 12};.
It is not obligatory to use strict equality here: if (key === 13) {. So you can use simple ==.
If you want to exit from a function, there is no need to return false;. Just use return;.
You do it right when cache elements that will be used later:
var mainInput = document.getElementById("mainInput");
var totalTodo = document.getElementById('totalTodos');
But why did not you cache todoList element document.getElementById('todoList').appendChild(node);?
document.querySelector('body') is document.body. You can use the last one.
startsWith method belongs to ECMAScript 6. If you do not use any staff that compile your code, you should use methods that are more browser compatible. if (id.startsWith("close_child_todo_")) can be replaced with if (id.indexOf("close_child_todo_") == 0).
You add event listeners on body two times ("Completed todo" and "Remove todo"). It would be nice to add one listener, in which call needed functions:
document.querySelector('body').addEventListener('click', function(event) {
todoCompleted(event);
todoRemoved(event);
});
You have got a helper function hasClass. It is nice. Also you can create getById function which will be a chortcut for document.getElementById.
You should have some conventions in naming variables, element classes / id's. Secondly, name variables by its context, nature. Let's see some examples.
var node = document.createElement("LI");
In this case node is a general name. body is node, todoApp is node. Though it is clear from code what this mean, it is better to use a semantic name. For instance, todoItem, todoTask. Also it makes sense to call names of element other than simple util variables. Add some prefix or suffix: $todoItem, todoItemEl.
This also fits for html classes. You have todoInput and mainInput. If I do not look at html code, I will not understand what is the difference between these elements. todoInput could be changed to todoInput-wrap and mainInput to todoInput. When naming html classes, you can follow some guidlines, for example BEM.
Follow DRY rule. This become your code more readable.
Do not make a lot of nesting blocks with if / else.
if (key === 13) {
if (mainInput.value === ''){
// If empty value entered
//console.log('Empty value');
return false;
}else{
This code could be formatted to the following:
if (keyCode != keyCodes.ENTER) return;
if (this.value === '') return;
Or even shorter:
if (keyCode != keyCodes.ENTER || this.value === '') return;
There are more tips here, but I do not want to write them. Moreover, more experienced js developer would write much more tips. What I advice you is to search the typical solution in the internet (or here, on codereview / stackoverflow). For expamle, if you want to chech if an element has class, search "javascript if element has class" or smth like this. Observe more solutions, understand them and choose the one you understood the more.
In the end I suggest some end example of remaked code.
Before start doing any application it will be nice to think about it: what it should do, what element should be there.
I will do todo app. It shoud have a name. Let's call it todo (this name will be used later). The name could be todoApp but app is not obligatory. It does not give us any additional notes. Also the app shoud have real title. In your case - "Todo App with Javascript". Let's call it title. You can choose name.
The app should take an input name from user. For this a simple input element will be fine. The name will be simple input. As no other input presents here the purpose of this element is clear.
When user submit input, the task should appear in another place. This will be collection of tasks. As you already called it list and it is a quite semantic name, this will be. So, list is a wrap where a task will appear. Even when no tasks exists, list exists always.
Now it is time to think about the main element of the app - task. There are several cases how we could call it. The first is task. The second - todo. However the main name todos represent the context and also contains todo part. So the next variant could be item.
In the end we can talk about small elements. This part
1 thing to do / 0 completed
could be called as a summary.
So, we have the names of the main elements. I will not comment all proccess. Just show the code I have got in the end.
Html markup:
<div class="todos">
<h1 class="todos__title">Todo App with Javascript</h1>
<div class="todos__input-wrap">
<input type="text" class="todos__input" placeholder="What needs to be done?" />
</div>
<p class="todos__summary">
<strong class="todos__count"></strong> thing to do /
<strong class="todos__done"></strong> completed
</p>
<ul class="todos__list"></ul>
</div>
<!-- Each id of a template has '_' at the end to mark it as template.
You can choose another marker or just use 'todos__item-template'. However it is seems like not a proper way as part 'template' does not change from time to time -->
<script type="template" id="todos__item_">
<li class="todos__item" id="todos__item-%%id">
<i class="todos__item__check ion-ios-circle-outline" data-id="%%id"></i>
%%name
<i class="todos__item__remove ion-ios-close-empty" data-id="%%id"></i>
</li>
</script>
Js code:
/**
* Each propery which is 'element' starts with '$'
*/
var keyCodes = {
ENTER: 13
};
var Todos = (function() {
// t - shortcut for 'this'
var t = {
count: 0,
done: 0,
templates: {},
init: function() {
this.$el = document.getElementsByClassName('todos')[0];
this.initTemplates();
this.defineEls();
this.initEvents();
this.updateDone(this.done);
},
initTemplates: function() {
this.templates.item = document.getElementById('todos__item_').innerHTML;
},
defineEls: function() {
['input', 'list', 'summary', 'count', 'done'].forEach(function(name) {
t.addEl(name);
});
},
addEl: function(elName) {
this['$' + elName] = this.$el.getElementsByClassName('todos__' + elName)[0];
},
initEvents: function() {
this.$input.addEventListener('keyup', function(ev) {
var keyCode = ev.which || ev.keyCode;
if (keyCode == keyCodes.ENTER && this.value !== '')
t.addItem(this.value);
});
this.$el.addEventListener('click', function(ev) {
if (ev.target.classList.contains('todos__item__check'))
t.check(ev.target);
if (ev.target.classList.contains('todos__item__remove'))
t.remove(ev.target);
});
},
remove: function(btn) {
var item = this.get(btn.getAttribute('data-id'));
item.parentNode.removeChild(item);
this.updateDone(--this.done);
if (!--this.count) this.$summary.style.display = 'none';
this.updateCount();
},
check: function(btn) {
var item = this.get(btn.getAttribute('data-id'));
if (item.classList.contains('todos__item--completed')) {
btn.classList.remove('ion-ios-checkmark-outline');
btn.classList.add('ion-ios-circle-outline');
item.classList.remove('todos__item--completed');
this.updateDone(--this.done);
} else {
btn.classList.add('ion-ios-checkmark-outline');
btn.classList.remove('ion-ios-circle-outline');
item.classList.add('todos__item--completed');
this.updateDone(++this.done);
}
},
get: function(id) {
return document.getElementById('todos__item-' + id);
},
addItem: function(name) {
this.renderEl(name);
this.updateCount(this.count);
this.$summary.style.display = 'block';
},
updateCount: function(count) {
if (typeof count == 'undefined') count = this.count;
this.$count.innerText = count;
},
updateDone: function(doneCount) {
if (doneCount < 0) this.done = doneCount = 0;
this.$done.innerText = doneCount;
},
renderEl: function(name) {
var id = ++this.count;
var html = this.templates.item.replace(/%%id/g, id).replace('%%name', name);
this.$list.insertAdjacentHTML('beforeend', html);
},
};
return t;
})();
Todos.init();
And styles:
* {
box-sizing: border-box;
-moz-box-sizing: border-box;
}
html, body{
font-family: 'Open Sans', sans-serif;
font-size: 16px;
color: #222;
}
.todos {
max-width: 400px;
margin: 0 auto;
}
.todos__input-wrap {
padding: 0;
list-style: none;
}
.todos__input {
width: 100%;
padding: 10px;
border: none;
border-bottom: 1px solid #ccc;
font-size: 16px;
font-weight: 300;
outline: none;
}
.todos__summary {
font-size: 80%;
display: none;
}
.todos__list {
margin-top: 10px;
margin: 0;
padding: 0;
}
.todos__item {
background: #f8f8f8;
padding: 20px 5px;
border-bottom: 1px solid #eee;
list-style: none;
}
.todos__item--completed {
text-decoration: line-through;
color: #12A74D;
}
.todos__item--completed .todos__item__remove {
color: #12A74D;
}
.todos__item__remove,
.todos__item__check {
width: 30px;
height: 30px;
font-size: 24px;
text-align: center;
display: inline-block;
float: left;
color: #999;
padding: 0 4px;
cursor: pointer;
}
.todos__item__remove {
float: right;
}
While this code is far from perfection, it has visible improvements.
Firstly, I use templates. Creating nodes directly is fine, but it is difficult to read such a code. When you have templates, you can made much more manipulation easely. Also it allows to use data. I made own function which parse template, but in real app it is preferred to use template engine (handlebars, mustache, jade, nunjucks).
Secondly, I separated blocks to methods. Now it has takes quite a little time to discover what happens in the code.
Now you can write your own example of the new app. Hope this will be helpfull. | {
"domain": "codereview.stackexchange",
"id": 18521,
"tags": "javascript, to-do-list"
} |
Tic-Tac-Toe Code | Question: I'm a beginner in Python programming, been doing it for only a few weeks and I decided to make a tic-tac-toe game in the Python interpreter. I'm looking for feedback on the program and any way I can improve it and shorten it and point out inefficiencies/errors since I've done everything I can to get rid of all errors I could think of.
I would appreciate if it was advice useful to my level and not all about high-level Python code, though I wouldn't mind some of it if its a very useful concept to use in this program and others.
Here's the code:
print("Welcome to the game of tic-tac-toe, you know how to play, so go on and have fun!")
print("Just know that the positions to input are of the form: ")
print("| 1 | 2 | 3 |")
print("| 4 | 5 | 6 |")
print("| 7 | 8 | 9 |")
p1, p2, p3, p4, p5, p6, p7, p8, p9 = " ", " ", " ", " ", " ", " ", " ", " ", " " # p stands
#for "position", so p1 means position 1, p2 means #position 2 etc.
game_playout = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'] # the letters are placeholders
#until the real game starts, after which each letter #will be chronologically replaced with a
#0 or a 1, 0 representing 'o' and 1 representing 'x'
won = 0
count = 0
x_pos = None
o_pos = None
used_positions = [ ]
def win_check():
if game_playout[0] == game_playout[1] == game_playout[2]:
return 1
elif game_playout[3] == game_playout[4] == game_playout[5]:
return 1
elif game_playout[6] == game_playout[7] == game_playout[8]:
return 1
elif game_playout[0] == game_playout[3] == game_playout[6]:
return 1
elif game_playout[0] == game_playout[4] == game_playout[8]:
return 1
elif game_playout[1] == game_playout[4] == game_playout[7]:
return 1
elif game_playout[2] == game_playout[5] == game_playout[8]:
return 1
elif game_playout[2] == game_playout[4] == game_playout[6]:
return 1
def layout_changer():
global p1, p2, p3, p4, p5, p6, p7, p8, p9
if x_pos == 1:
p1 = "x"
elif x_pos == 2:
p2 = "x"
elif x_pos == 3:
p3 = "x"
elif x_pos == 4:
p4 = "x"
elif x_pos == 5:
p5 = "x"
elif x_pos == 6:
p6 = "x"
elif x_pos == 7:
p7 = "x"
elif x_pos == 8:
p8 = "x"
elif x_pos == 9:
p9 = "x"
if o_pos == 1:
p1 = "o"
elif o_pos == 2:
p2 = "o"
elif o_pos == 3:
p3 = "o"
elif o_pos == 4:
p4 = "o"
elif o_pos == 5:
p5 = "o"
elif o_pos == 6:
p6 = "o"
elif o_pos == 7:
p7 = "o"
elif o_pos == 8:
p8 = "o"
elif o_pos == 9:
p9 = "o"
while won != 1:
x_pos = input("Choose your position to place x (1 to 9) ")
while not x_pos.isnumeric():
print("Error! Looks like you've haven't entered a number, please try again!")
x_pos = input("Choose your position to place x (1 to 9) ")
x_pos = int(x_pos)
while x_pos not in [1, 2, 3, 4, 5, 6, 7, 8, 9] or x_pos in used_positions:
print(f"| {p1} | {p2} | {p3} |")
print(f"| {p4} | {p5} | {p6} |")
print(f"| {p7} | {p8} | {p9} |")
print("Invalid position! Please try another value:")
x_pos = int(input("Choose your position to place x (1 to 9) "))
used_positions.append(x_pos)
count += 1
game_playout.pop(x_pos - 1)
game_playout.insert(x_pos - 1,
1) # We replace the (x_pos-1)th index element in game_playout with 1,where 1 indicates an 'x'
layout_changer()
print(f"| {p1} | {p2} | {p3} |")
print(f"| {p4} | {p5} | {p6} |")
print(f"| {p7} | {p8} | {p9} |")
won = win_check()
if won == 1:
print("x has won! Congratulations!")
print()
print("Thank you for playing the game! Hope you enjoyed!")
quit()
elif count >= 9:
print()
print("Draw!")
print("Thank you for playing the game! Hope you enjoyed!")
quit()
o_pos = input("Choose your position to place o (1 to 9) ")
while not o_pos.isnumeric():
print("Error! Looks like you've haven't entered a number, please try again!")
o_pos = input("Choose your position to place o (1 to 9) ")
o_pos = int(o_pos)
while o_pos not in [1, 2, 3, 4, 5, 6, 7, 8, 9] or o_pos in used_positions:
print(f"| {p1} | {p2} | {p3} |")
print(f"| {p4} | {p5} | {p6} |")
print(f"| {p7} | {p8} | {p9} |")
print("Invalid position! Please try another value:")
o_pos = int(input("Choose your position to place o (1 to 9) "))
used_positions.append(o_pos)
count += 1
game_playout.pop(o_pos - 1)
game_playout.insert(o_pos - 1,
0) # We replace the (o_pos-1)th index element in game_playout with 0,where 0 indicates an 'o'
layout_changer()
print(f"| {p1} | {p2} | {p3} |")
print(f"| {p4} | {p5} | {p6} |")
print(f"| {p7} | {p8} | {p9} |")
won = win_check()
if won == 1:
print("o has won! Congratulations!")
print()
print("Thank you for playing the game! Hope you enjoyed!")
Answer: Where to start…
tuple assignment
p1, p2, p3, p4, p5, p6, p7, p8, p9 = " ", " ", " ", " ", " ", " ", " ", " ", " "
Is that the correct number of " "? Did you count? Of course you did, and yes it is the correct number since tuple assignment will generate an exception if a mismatch occurs. But, you don’t need to type, or count; just use tuple multiplication/replication:
p1, p2, p3, p4, p5, p6, p7, p8, p9 = (" ",) * 9
pop/insert
This code is very obtuse and obfuscated:
game_playout.pop(x_pos - 1)
game_playout.insert(x_pos - 1,
1) # We replace the (x_pos-1)th index element in game_playout with 1,where 1 indicates an 'x'
You are popping (removing) an element, and then inserting a new element at the same position. A lot of busy work moving elements backwards in the list for the pop, and then moving them forward again during the insert. Instead, you could simply write:
game_playout[x_pos - 1] = 1 # Replace the (x_pos-1)th element with 1, indicating an 'x'
WET -vs- DRY
Why do you Write Everything Twice (WET)? Don’t Repeat Yourself (DRY)!
You have this code several times:
print(f"| {p1} | {p2} | {p3} |")
print(f"| {p4} | {p5} | {p6} |")
print(f"| {p7} | {p8} | {p9} |")
You should move it into a function. You’ve demonstrated you know how to use them already.
Return Type Consistency
win_check() will return either an integer 1, or it doesn’t return anything at all, so it implicitly returns None. That makes it hard to reason about. You can’t say it returns an int, nor can you say it returns a bool.
In the code which calls win_check(), you've ended up with if won == 1, and while won != 1 which are opaque conditions. What does won == 1 mean? Would won == 2 mean player 2 has won? Is won == 0 a draw game, or game not over yet? It isn't obvious. In actuality, won == 0 before the first move is made, and suddenly becomes won == None afterwards, until someone wins. Very confusing.
The function should return a boolean True or False. Instead of return 1, use return True. At the end, add an explicit return False.
Lists -vs- Sets
You have the list used_positions recording which moves have been made, and append moves to it, and test pos in used_positions to ensure moves aren’t repeated. The correct data structure to use for this would be a set.
used_positions = set()
...
while ... or x_pos in used_positions:
...
used_position.add(x_pos)
...
Using sets reduces the search time of x_pos in used_positions to \$O(1)\$ … effectively constant time, instead of the \$O(N)\$ time of searching a list.
But …
Three Different versions of the Same Thing
You have game_playout representing the played positions for determining a win condition. You have used_positions for recording which moves have been played, to prevent repeats, and p1, p2, … p9 for displaying which positions have been played by which player.
These are all the same thing! Or they should be!
Consider:
grid = [" "] * 9
That creates a list of length 9, with each element containing a space. Perfect for displaying the tic-tac-toe grid:
def print_grid():
print(f"| {grid[0]} | {grid[1]} | {grid[2]} |")
print(f"| {grid[3]} | {grid[4]} | {grid[5]} |")
print(f"| {grid[6]} | {grid[7]} | {grid[8]} |")
Or slightly more compactly, as:
def print_grid():
print(("| {} | {} | {} |\n"
"| {} | {} | {} |\n"
"| {} | {} | {} |").format(*grid))
Note: There are no commas between the above strings, so Python automatically concatenates them into one long string containing 9 {} format codes. The *grid explodes the elements of the grid list into individual arguments for the .format() function.
As moves are made, the grid can be filled in with x and o values:
grid[x_pos - 1] = 'x'
...
grid[o_pos - 1] = 'o'
… which eliminates the ugly mess called layout_changer().
The used_positions check becomes simply testing if the spot is not “empty”:
while ... or grid[x_pos - 1] != " ":
...
Finally, win_check() needs to be fixed so it doesn’t declare 3 spaces in a row as a winning combination:
def win_check():
if grid[0] == grid[1] == grid[2] != ' ':
return True
elif grid[3] == grid[4] == grid[5] != ' ':
return True
…
return False
Draw
You determine a draw by maintaining a count and checking if it reaches 9. This can again be another variant of the grid. The game is a draw if nobody won and no valid moves remain. No valid moves remain means grid contains no empty cells.
elif " " not in grid:
# draw game
No more counting required.
Don’t quit!
Avoid quit(). Never ever call it, as it exits the Python interpreter. This makes testing nigh impossible, since tests are usually written in Python, and don’t get a chance to declare pass or failure if the Python interpreter vanishes before they can determine the pass/fail state!
Instead, move the mainline code into a function, and simple return from the function instead of calling quit().
X/O
The X player and O player code is virtually identical. Different variables are used (x_pos -vs- o_pos) but these could be replaced with just pos eliminating that difference. Then the only difference is the message printed, and the code that is assigned to the given position.
If the code is the same, it too can be moved into a function.
def player_move(symbol):
pos = input(f"Choose your position to place {symbol} (1 to 9) ")
while not pos.isnumeric():
print("Error! Looks like you've haven't entered a number, please try again!")
pos = input(f"Choose your position to place {symbol} (1 to 9) ")
pos = int(pos)
while pos not in [1, 2, 3, 4, 5, 6, 7, 8, 9] or grid[pos - 1] != " ":
print_grid()
print("Invalid position! Please try another value:")
pos = int(input(f"Choose your position to place {symbol} (1 to 9) "))
grid[pos - 1] = symbol
Avoid Global variables
At this point, it should be obvious the only global variable left is grid. This could simply be passed to the various functions as an argument, eliminating the last global variable.
Why are global variables bad? In 150 lines, you have 15 global variables. Which functions can change a global variable? Which functions use global variables? These are hard questions. If you expand your code to 1500 lines, say to add a TkInter GUI interface for your game, are you going to end up with 150 global variables? Now reasoning about which functions use and change which variables becomes even harder.
Another question: what variables do you need to reset if you wanted to play a second game?
What if you wanted to player against the computer, and you'd like the computer to "think" about playing various moves to try and play smarter. Currently you have to make a tentative move, evaluate it, then "undo" the move to explore another possibility. If you used local variables, you could make a copy of the game state to try out a move, and then discard it; nothing to "undo".
Reworked code
Here is a reworking of your code, using the above points. The code has gone from 150 lines to 74. I've introduced a few extra concepts for you to study try...except..., import itertools, any(...) and all(...). Hopefully, these are not too far beyond your current level, so you find them understandable.
import itertools
BLANK = ' '
X = 'x'
O = 'o'
WINNING_ROWS = (
(0, 1, 2), (3, 4, 5), (6, 7, 8), # rows
(0, 3, 6), (1, 4, 7), (2, 5, 8), # columns
(0, 4, 8), (2, 4, 6), # diagonals
)
def print_grid(grid):
print("| {} | {} | {} |\n"
"| {} | {} | {} |\n"
"| {} | {} | {} |".format(*grid))
def win_check(grid, symbol) -> bool:
return any(all(grid[index] == symbol
for index in row)
for row in WINNING_ROWS)
def player_move(grid, symbol):
while True:
try:
pos = input(f"Choose your position to place {symbol} (1 to 9): ")
pos = int(pos) - 1
if pos not in range(9):
print("Error! Looks like you've entered an invalid position, please try again!")
elif grid[pos] != BLANK:
print("Error! Looks like you've entered a filled position, please try again!")
else:
break
except ValueError:
print("Error! Looks like you've haven't entered a number, please try again!")
grid[pos] = symbol
print_grid(grid)
def game():
grid = [BLANK] * 9
print_grid(grid)
symbols = itertools.cycle((X, O))
while any(cell == BLANK for cell in grid):
symbol = next(symbols)
player_move(grid, symbol)
if win_check(grid, symbol):
print(f"{symbol} has won! Congratulations!")
break
else:
print("Draw!")
print()
print("Thank you for playing the game! Hope you enjoyed!")
def main():
print("Welcome to the game of tic-tac-toe, you know how to play, so go on and have fun!")
print("Just know that the positions to input are of the form: ")
print("| 1 | 2 | 3 |")
print("| 4 | 5 | 6 |")
print("| 7 | 8 | 9 |")
print()
game()
if __name__ == '__main__':
main() | {
"domain": "codereview.stackexchange",
"id": 43916,
"tags": "python, beginner, python-3.x, tic-tac-toe"
} |
Demodulation of FSK signal | Question: What kind of FSK signal is this and what demodulation technique can I use to demodulate it. Why is the phase of the 1's changing like that and how does that effect how it needs to be demodulated. I will be programming the demodulation in python.
I am trying to low pass at 900Hz and then generate the envelope but I am not getting the desired result.
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
from scipy.fftpack import fft, rfft, rfftfreq, irfft
import scipy.signal.signaltools as sigtool
from scipy.io import wavfile
fs, data = wavfile.read('jx3p.wav')
h = signal.firwin(numtaps = 300, cutoff = 900, fs=fs)
data = signal.lfilter(h, 1, data)
data = np.abs(sigtool.hilbert(data))
import matplotlib.pyplot as plt
plt.plot(data)
plt.show()
UPDATE
Here is the code to multiple the signal by the same signal with a 12 sample delay and then add the resulting signal with a 3 sample delay of itself.
import matplotlib.pyplot as plt
from scipy.fftpack import rfft, rfftfreq
from scipy.io import wavfile
from scipy.signal import blackman
fs, data = wavfile.read('jx3p.wav')
x = 286600
y = x+22050
data = data[x:y]
m = max(abs(data))
data = [d/m for d in data]
delay = 12
data1 = tuple(a*b for a,b in zip(data[delay:],data))
filter_delay = int(delay/4)
data2 = tuple(a+b for a,b in zip(data1[filter_delay:],data1))
plt.plot(data[:300],"r")
plt.plot(data1[:300],"b")
plt.plot(data2[:300],"g")
plt.show()
Answer: For this specific waveform as described, the following would demodulate the signal from the Frequency Shift Modulated Input into a square wave output:
This works given the duration of the "0" symbol is 12 samples by using a "delay and multiply" frequency discriminator. The multiply will have a strong double frequency component that needs to be filtered out (and can be used for timing recovery if the timing was not synchronous to the symbol rate as further detailed below), and this is done with a simple delay of 3 samples and add, which would provide a null at twice the frequency of the zero symbol. The "1" symbol as shown by the OP is a low frequency symbol in the FSK modulation, and observe how in this case it is approximately formed using the same half cycle of the higher frequency "0" symbol followed by a zero magnitude for a full one and a half cycle duration before continuing with the negative half cycle followed by a zero magnitude for a duration for one and a half cycles (thus it is 4 times the duration of the "1" symbol.) This is convenient as the output for the case of demodulating the "0" symbol would be naturally nulled given the block diagram above without the need for subsequent filtering.
Notice with this approach that at the start of each symbol, if the symbol is a 1 the initial first half cycle result will be a sin^2 always regardless of the next symbol, but if the symbol is a zero the result will be zero throughout the duration of the zero. So in this case with this particular waveform, the way the lower frequency is constructed actually helps for a cleaner demodulation, since it provides the nulled response if the delay is the duration of the zero symbol (Maybe the waveform designer had this in mind which is why it has the peculiar form for the lower frequency).
The "Square Wave" output would be further filtered through an "integrate and dump" process prior to making a final symbol decision: sum over 12 samples in the symbol duration and decide if the result if greater or less than a decision threshold. If greater a "0" was demodulated, and if less a "1" was demodulated. Given the atypical longer duration between the symbols, four "1"'s in a row is the presence of an actual "1" symbol.
This structure can then be readily used with standard timing recovery for recovering the symbol clock boundaries. If the symbol clock is synchronized (coherent) to the waveform sample clock, the timing recovery is quite trivial. Two options show below:
The strong double frequency component can be used for symbol timing recovery for the case of the symbols no being coherent to the sample clock. If they are coherent then symbol timing recovery is simply aligning the sample edge to a start of a symbol which can easily done with a simple threshold detection at the Demodulated FSK output as shown in the block diagram (for lower SNR conditions it would be more robust to average the estimated timing position even such a threshold approach was used, such that the timing position does not change abruptly based on the result of any one threshold detection). Once detected, a modulo 12 counter starts, and every time the counter rolls over the symbol is selected at the output. The more robust timing approach would compare the roll-over timing clock and the FSK output threshold detector impulses to establish a timing error (another multiplier would do this), which would then be accumulated in a timing loop to adjust the actual start time of the cyclical timing recovery counter (again only need to do that for a very robust solution in the presence of low SNR, for the waveform as shown all that would not be necessary).
See this link where I detail non-coherent FSK demodulation approaches that further how a delay and multiply forms a frequency discriminator:
https://electronics.stackexchange.com/posts/293723/edit
With this graphic in mind; a delay multiply is a frequency discriminator:
Optional coherent techniques would have a 3 dB SNR advantage but require more processing. This could be done by matched filter correlation to the two symbols (have replicas of the two symbols and multiply and accumulate to each one to decide which symbol is observed). The delay and multiply followed by delay and add is notably simpler. | {
"domain": "dsp.stackexchange",
"id": 8076,
"tags": "python, demodulation, scipy, fsk"
} |
Can we define the effective mass or the moving mass of a photon? | Question: I know that the rest mass of a photon is zero. but the photon can be bent by gravity (which can also be explained by the curvature of space-time due to the effect of mass), this implies that it must have some effective mass, while in motion, therefore does it also bend space-time? can the mass of the photon be defined in common(SI) units, how?
Answer: The concept of relativistic mass is obsolete. We do not need to ascribe mass to a photon in order to see that it distorts space:
As an excitation of the electromagnetic field, a photon contributes to the stress-energy tensor $T_{\mu\nu}$, which, through the Einstein field equations, will distort the metric on spacetime, and thus exert gravity. | {
"domain": "physics.stackexchange",
"id": 17290,
"tags": "general-relativity, mass, mass-energy"
} |
Why the displacement current is zero outside the capacitor? | Question: Always when I study displacement
Current it is zero outside the capacitor because the electric field is zero outside
For example this photo
Why this electric field on the surface s one is zero
I wondering why is that .charges move in the circuit because of electric field
Answer:
Always when I study displacement Current it is zero outside the capacitor because the electric field is zero outside
That is "mostly true". The field created by a charged capacitor is mostly contained between the plates of the capacitor. However there are "fringing" field lines, and a very small amount of field will go from the outside of one plate to the outside of the other.
Why this electric field on the surface s one is zero I wondering why is that .charges move in the circuit because of electric field
The electric field through the surface s is near 0, but not exactly. In particular, if there is current flowing through the wire, then there is an electric field corresponding to the microscopic version of Ohm's Law.
$$\vec{J} = \sigma\vec{E}$$
Where $\vec{J}$ is the current density, $\sigma$ is the conductivity of the wire material, and $\vec{E}$ is the electric field. | {
"domain": "physics.stackexchange",
"id": 79752,
"tags": "electromagnetism, electric-circuits, electricity, maxwell-equations"
} |
Mechanism of the reaction of cyclohexanone and nitric acid to form Adipic acid | Question: Can I get some help in figuring out the mechanism for this reaction.
Adipic acid is a molecule with two carboxylic groups,but I can't see how that happens. My initial thoughts involved that the carbonyl group in cyclohexanone is protonated,but I couldn't get any further as I couldn't figure out what would happen after that.
Answer: It think this reaction goes via following mechanism. | {
"domain": "chemistry.stackexchange",
"id": 8499,
"tags": "organic-chemistry, reaction-mechanism, carbonyl-compounds"
} |
Apparent paradox concerning Heisenberg's uncertainty principle | Question: I have just begun my Introduction to Quantum Mechanics course in my undergrad and I am trying to understand the uncertainty principle on a fundamental level. I think the best way to understand the fundamentals of this principle is to question the principle and understand why doing so is wrong.
Consider the following thought experiment:
I have a setup consisting of two independent regions. Region A with an
electric field along the positive x-axis, lets say, and Region B with
a magnetic field, also along the positive x-axis. In Region A I have a
cathode and the electric field causes a potential difference thereby
accelerating the electrons from rest to a particular velocity (along
positive x-axis).
Let us just focus on a single electron. I know the exact velocity
(magnitude and direction) to which this electron is accelerated to
(energy conservation). Now this electron is going to enter the
magnetic field in a direction that is parallel to the field. I know
that the electron is going to move in a straight line in the magnetic
field, without losing any energy as the magnetic field does zero work.
So I know the exact momentum of the electron at all times as this
momentum is not going to change as long as the electron is inside the
magnetic field. So uncertainty in momentum is zero. Since I know the
exact trajectory of the electron, the uncertainty in position is also
zero and I know both of these simultaneously.
So it appears like this though experiment violates the Heisenberg uncertainty principle.
What is the flaw in my argument? Why can't this happen?
Answer: You say:
Let us just focus on a single electron. I know the exact velocity (magnitude and direction) to which this electron is accelerated to (energy conservation).
but this isn't true. You know the electron energy has increased by $E$ eV, where $E$ is the potential difference you're using but you don't know what its energy was initially i.e. when it left the anode and before being accelerated by your field. The only way you can know the initial momentum of the electron precisely is if it's completely delocalised i.e. you don't know where it is or when it was emitted.
I applaud your attempts at understanding the uncertainty principle, but you are going about it the wrong way. You need to start by writing down the wavefunction for a free particle. The eigenfunctions for a free particle are infinite plane waves, which have a precise momentum but completely unspecified position. Assuming you start with a partially localised particle you construct its wavefunction by using Fourier synthesis i.e. you build up the initial probability distribution by summing (an infinite number of) plane waves. Because this requires combining waves with different momenta that means your partially localised particle has a spread of momenta. | {
"domain": "physics.stackexchange",
"id": 28023,
"tags": "quantum-mechanics, heisenberg-uncertainty-principle, thought-experiment"
} |
FizzBuzz - officially now in CLISP | Question: I had to jump on the bandwagon for this one. The task does a great job of helping you learn a language you're unfamiliar with.
So today I present you with my version of the infamous FizzBuzz game in Common Lisp. This is really the first "program" I've made in CLISP, and even though it's pretty small, I'm quite proud!!
I'd really love to know if there's an even easier way to make the same functionality. I notice a lot of repetition in terms of is-multiple, so if anyone has any ideas how I could DRY up all that, I'd appreciate it. Also, is there a formatting standard for symbol names and such in Lisp?
fizzbuzz.lisp
(defun is-multiple (i against)
(= (mod i against) 0))
(defun num-action (i)
(cond ((and (is-multiple i 3) (is-multiple i 5)) (print "FizzBuzz"))
((is-multiple i 3) (print "Fizz"))
((is-multiple i 5) (print "Buzz"))
(T (print i))))
(dotimes (i 101) (num-action i))
Can it be cleaned up any further?
Answer: Do not recompute is-multiple repeatedly by either binding the value:
(defun num-action (i)
(let ((i3 (is-multiple i 3))
(i5 (is-multiple i 5)))
(cond ((and i3 i5) (print "FizzBuzz"))
(i3 (print "Fizz"))
(i5 (print "Buzz"))
(T (print i)))))
or by using if:
(defun num-action (i)
(if (is-multiple i 3)
(if (is-multiple i 5)
(print "FizzBuzz")
(print "Fizz"))
(if (is-multiple i 5)
(print "Buzz")
(print i))))
PS. please fix indentation | {
"domain": "codereview.stackexchange",
"id": 23120,
"tags": "beginner, lisp, common-lisp, fizzbuzz"
} |
Is there a mechanism that allows this kind of piston/plunger movement? | Question: I am trying to create a prototype for a gravity energy storage system, and I need to release potential energy stored in a heavy load of mass M, raised to a height *H.
I intend to use the mass traversing the length, to drive a piston, which pushes upon a hydraulic fluid to turn the P.E into K.E.
I am at a loss as to what mechanism I can use to efficiently (and simply) translate the descent into a vertical movement of the piston - since the piston will be contained in a sealed cylinder.
I have included a rather crude sketch below:
Where:
The parallelogram represents the load bearing surface,
The two dark circles represent the piston/plunger
My question is this: Is there a mechanism that allows a plunger/piston to be moved along an axis WITHIN a cylinder, by means of external force applied OUTSIDE the cylinder containing the piston?
I think what I'm looking for is some kind of telescopic mechanism.
Answer: Energy storage is a fairly common feature in hydraulic systems. Storing energy via nitrogen compression in accumulators is far more cost efficient than storing via mass/gravity. Personally I would just use something off the shelf. Here are a few popular brands:
https://ph.parker.com/us/en/piston-style-accumulators-parker-a-series
https://www.boschrexroth.com/en/xc/products/product-groups/industrial-hydraulics/topics/cylinders/large-hydraulic-cylinders/products-and-features/hydraulic-piston-accumulators/index
https://www.hawe.com/en-us/products/product-search-by-category/hydraulic-accessories/accumulators/hps/
https://www.hydac.com/shop/en/hydraulic-accumulators
I'm highlighting piston type accumulators since you specifically asked for that style. Other varieties such as diaphragm type or bladder type are usually cheaper per kJ of storage. Piston type accumulators are really for specialty applications such as extreme pressure (700+ bar), corrosive fluids that dissolve bladder materials, narrow installation dimensions, horizontal installation (bladder type service life is best with vertical installation), etc.
Note that with any type of energy storage, there are many safety considerations regarding how to dissipate energy in machine failure situations. Don't simply throw together an accumulator + pump + hoses. You need additional valves for pressure relief, accumulator dump, pump outlet check valve, etc. Further reading -
https://www.machinerylubrication.com/Read/30331/hydraulic-accumulators-dangers | {
"domain": "engineering.stackexchange",
"id": 4615,
"tags": "mechanical-engineering, mechanisms, hydraulics, pistons"
} |
Reversing a String using Stack | Question: Is this code okay?
import java.util.Stack;
public class StackReverse {
public static void main(String[] args) {
final String inputString = "code review";
final String reversed = reverseString(inputString);
System.out.println("The reversed string is " + reversed);
}
public static String reverseString(String originalString) {
Stack<Character> stack = new Stack<>();
String reversed = "";
for (int i = 0; i < originalString.length(); i++) {
char ch = originalString.charAt(i);
stack.push(ch);
}
for (int i = 0; i < originalString.length(); i++) {
char ch = stack.pop();
reversed = reversed + ch;
}
return reversed;
}
}
Answer: Unicode is hard to get right, especially in Java as it has a more or less broken concept of a “character”. An Unicode code point is a 21-bit number. These code points are encoded to bytes with the UTF-8 or UTF-16 encodings (well, there are a couple more…). But when Java was created, Unicode only had characters in a 16-bit range, and code points were encoded with the (now deprecated) UCS-2 encoding.
There's a lot of pain from the fact that the 21-bit Unicode code points don't fit into a single 16-bit char or Character in Java. Two consecutive chars might actually be surrogate pairs. When reversing a string, we have to special-case these. We can use the Character.isHighSurrogate(char) function to test whether a given char starts a surrogate pair. If we encounter such a code point, we advance to the next char in the string, and push it onto the stack first:
for (int i = 0; i < originalString.length(); i++) {
char ch = originalString.charAt(i);
if (Character.isHighSurrogate(ch)) {
i++;
if (i < originalString.length()) {
stack.push(originalString.charAt(i));
}
}
stack.push(ch);
}
As a test case, you can reverse a string with the smiley character : "hi \ud83d\ude03". Reversed, it should be "\ud83d\ude03 ih".
It is best to think of Java's char as “an UTF-16 code unit” (see Wikipedia for all the details about UTF-16 you didn't want to know, but should). To learn about what Unicode is, and how to tame it, start with Joel Spolsky's Unicode blog post. | {
"domain": "codereview.stackexchange",
"id": 9807,
"tags": "java, beginner, strings, stack"
} |
Object recognition object detected but wrong position | Question:
While trying to set up object recognition on a robot created in fusion 360 using the find_object_2d package, Iam able to detect my object (coke can); however, when i open rviz, the tf of the object is recognized as above the camera rather than in front of it.
Below is my urdf and 3d detection launch file.
URDF:
<gazebo reference="camera_1">
<material>${body_color}</material>
<mu1>0.2</mu1>
<mu2>0.2</mu2>
<selfCollide>true</selfCollide>
</gazebo>
<gazebo reference="camera_1">
<sensor name="camera_1" type="depth">
<update_rate>20</update_rate>
<camera>
<horizontal_fov>1.047198</horizontal_fov>

<clip>
<near>0.05</near>
<far>3</far>
</clip>
</camera>
<plugin name="camera_1_controller" filename="libgazebo_ros_openni_kinect.so">
<baseline>0.2</baseline>
<alwaysOn>true</alwaysOn>
<updateRate>1.0</updateRate>
<cameraName>camera_1_ir</cameraName>
<imageTopicName>/camera_1/color/image_raw</imageTopicName>
<cameraInfoTopicName>/camera_1/color/camera_info</cameraInfoTopicName>
<depthImageTopicName>/camera_1/depth/image_raw</depthImageTopicName>
<depthImageInfoTopicName>/camera_1/depth/camera_info</depthImageInfoTopicName>
<pointCloudTopicName>/camera_1/depth/points</pointCloudTopicName>
<frameName>camera_1</frameName>
<pointCloudCutoff>0.5</pointCloudCutoff>
<pointCloudCutoffMax>3.0</pointCloudCutoffMax>
<distortionK1>0.00000001</distortionK1>
<distortionK2>0.00000001</distortionK2>
<distortionK3>0.00000001</distortionK3>
<distortionT1>0.00000001</distortionT1>
<distortionT2>0.00000001</distortionT2>
<CxPrime>0</CxPrime>
<Cx>0</Cx>
<Cy>0</Cy>
<focalLength>0</focalLength>
<hackBaseline>0</hackBaseline>
</plugin>
<link name="camera_1">
<inertial>
<origin rpy="0 0 0" xyz="0.012500000000000011 0.0 0.0"/>
<mass value="0.3575"/>
<inertia ixx="0.001266" ixy="0.0" ixz="0.0" iyy="9.3e-05" iyz="0.0" izz="0.00121"/>
</inertial>
<visual>
<origin rpy="0 0 0" xyz="-0.25 -0.0 -0.15"/>
<geometry>
<mesh filename="package://moodz_description/meshes/camera_1.stl" scale="0.001 0.001 0.001"/>
</geometry>
<material name="silver"/>
</visual>
<collision>
<origin rpy="0 0 0" xyz="-0.25 -0.0 -0.15"/>
<geometry>
<mesh filename="package://moodz_description/meshes/camera_1.stl" scale="0.001 0.001 0.001"/>
</geometry>
</collision>
</link>
<origin rpy="0 0 0" xyz="0.25 0.0 0.15"/>
<parent link="base_link"/>
<child link="camera_1"/>
</joint>
Launch Files:
3d Object_Recognition:
<launch>
<node name="find_object_3d" pkg="find_object_2d" type="find_object_2d" output="screen">
<param name="gui" value="true" type="bool"/>
<param name="settings_path" value="~/.ros/find_object_2d.ini" type="str"/>
<param name="subscribe_depth" value="true" type="bool"/>
<param name="session_path" value="$(find obj_detection)/sessions/coke_session.bin" type="str"/>
<param name="objects_path" value="" type="str"/>
<param name="object_prefix" value="object" type="str"/>
<remap from="rgb/image_rect_color" to="/camera_1/color/image_raw"/>
<remap from="depth_registered/image_raw" to="/camera_1/depth/image_raw"/>
<remap from="depth_registered/camera_info" to="/camera_1_ir/depth/camera_info"/>
</node>
<!-- Example of tf synchronisation with the objectsStamped message <node name="tf_example" pkg="find_object_2d" type="tf_example" output="screen">
<param name="map_frame_id" value="/camera_1" type="string"/>
<param name="object_prefix" value="object" type="str"/>
</node>-->
<launch>
<param command="$(find xacro)/xacro $(find moodz_description)/urdf/moodz.xacro" name="robot_description"/>
<node args="-param robot_description -urdf -model moodz" name="spawn_urdf" pkg="gazebo_ros" type="spawn_model"/>
<include file="$(find gazebo_ros)/launch/empty_world.launch">
<arg name="paused" value="true"/>
<arg name="use_sim_time" value="true"/>
<arg name="gui" value="true"/>
<arg name="headless" value="false"/>
<arg name="debug" value="false"/>
</include>
<node name="robot_state_publisher" pkg="robot_state_publisher" type="robot_state_publisher"/
Rviz:

Originally posted by ROS_newbie on ROS Answers with karma: 23 on 2022-02-09
Post score: 1
Original comments
Comment by osilva on 2022-02-09:
Added points. Can you pls add the image to your question. Thank you
Comment by ROS_newbie on 2022-02-09:
Image can now be viewed
Answer:
This was solved by publishing a static transform between the camera and base_link according to the following command to your launch file:
This publishes a transform from your base_link to camera_link and rotates according to the above angles.
Originally posted by ROS_newbie with karma: 23 on 2022-05-23
This answer was ACCEPTED on the original site
Post score: 1 | {
"domain": "robotics.stackexchange",
"id": 37430,
"tags": "ros, 3d-object-recognition"
} |
Engineering Equation Solver (EES) Entropy returning NEGATIVE numbers | Question: I am using Engineering Equation Solver (EES).
When I try to find Entropy given Temperature and Quality (liquid) at saturation pressure for carbon dioxide I keep getting a negative number.
s=Entropy('CarbonDioxide',T=300[K],x=0)
I had hoped this was posted as a known problem... then the thought struck me that it might be isolated to just me after an extensive search.
Would someone give this script a try on their computer and tell me if they are getting the same results, or better yet point out what wall I have hit my head into and point out to me the obvious cause that I am blind to. :)
Note:
This is regarding the finding of Entropy at a specific state in a steady-state system, NOT the change in Entropy which can be negative.
Script:
$UnitSystem SI K Pa kJ mass deg
$TabStops 0.2 3.5 in
R$='CarbonDioxide'
T=300 [K]
s_l=Entropy(R$,T=T,x=0) "find Entropy for liquid"
s_v=Entropy(R$,T=T,x=1) "find Entropy for vapour"
h_l=Enthalpy(R$,T=T,x=0) "find Enthalpy for liquid"
Solution Window:
h_l=-223.9 [kJ/kg]
R$='CarbonDioxide'
s_l=-1.465 [kJ/kg-K] <------ NEGATIVE ?!? how is this posible?
s_v=-1.117 [kJ/kg-K] <------ NEGATIVE ?!? how is this posible?
T=300 [K]
Answer: It's probably expressed relative to a specified reference state where s is taken as zero. Check the EES literature for what this reference state is. | {
"domain": "physics.stackexchange",
"id": 65266,
"tags": "thermodynamics, computational-physics, software"
} |
rosdep Error With rviz | Question:
Hi,
I'm trying to build rviz for ROS, but I seem to run into this error:
$rosdep install rviz
Failed to find rosdep eigen for package rviz on OS:fedora version:16
rosdep install ERROR:
failed to install eigen
How exactly does rosdep do to install dependencies?
Thanks!
-Masoug
Originally posted by masoug on ROS Answers with karma: 1 on 2012-03-01
Post score: 0
Answer:
That means that there is no rule for installing "eigen" on Fedora. You will need to figure out the proper way to install eigen on your own. You can help future Fedora users by contributing a rule for eigen back, following the instructions here:
http://ros.org/wiki/rosdep
Originally posted by kwc with karma: 12244 on 2012-03-01
This answer was ACCEPTED on the original site
Post score: 2
Original comments
Comment by masoug on 2012-03-02:
How do I make these "rosdeps"?
Comment by tfoote on 2012-04-23:
See the rosdep wiki page: http://www.ros.org/wiki/rosdep | {
"domain": "robotics.stackexchange",
"id": 8454,
"tags": "rviz, rosdep"
} |
Are linear feedback shift registers being generally discouraged by cryptologists? | Question: Katz and Lindell mention in their book that LFSR have been horrible as basis for pseudorandom generators, and advocate that they are not used anymore (well, they also recommend that people use block ciphers instead of stream ciphers).
But I see for example that one of the ciphers in the estream portfolio (Grain, targeted for hardware) uses an LFSR, so the opinion that LFSRs are not good is not a consensus.
I'd like to know if there many cryptologists sharing Katz and Lindell's opinion on LFSRs (and on stream ciphers)?
Answer: There are many types of cryptanalytic attacks: Linear approximations, Algebraic attacks, Time-memory-data-tradeoff attacks, fault attacks.
For example you can read the survey: "Algebraic Attacks On Stream Ciphers (Survey)"
Abstract: Most stream ciphers based on linear feedback shift registers (LFSR)
are vulnerable to recent algebraic attacks. In this survey paper, we describe
generic attacks: existence of algebraic equations and fast algebraic attacks. ...
At the end you can find other relevant references.
Another good paper about fault attacks to stream ciphers is: "Fault Analysis of Stream Ciphers"
Abstract: ... Our goal in this paper is to develop general techniques which can be used to attack the standard constructions of stream ciphers based on LFSR’s, as well as more specialized techniques which can be used against specific stream ciphers such as RC4, LILI-128 and SOBERt32. While most of the schemes can be successfully attacked, we point out several interesting open problems such as an attack on FSM filtered constructions and the analysis of high Hamming weight faults in LFSR’s.
For time-memory-data tradeoffs attacks you can read: "Cryptanalytic Time/Memory/Data tradeoffs for stream ciphers". | {
"domain": "cstheory.stackexchange",
"id": 1495,
"tags": "soft-question, cr.crypto-security, pseudorandom-generators"
} |
How to calculate the force between line of charges? | Question: As far as I know Coulomb's law only works for point charges but what if there are not any point charges? For example, let's imagine there are three rectangles with different sizes. First one is 50 cm, second one is 30 cm and last one ise 10 cm. The distance between first and second triangle is 10 cm and the distance between the second and third rectangle is also 10 cm. If their centers are collinear what is the net force acting on second ractangle? Should I calculate it by think their centers as a point charge? I searched for it but couldn't find a clear answer.
Here is the picture of the problem for those who couldn't understand from my explanation.
Answer: It depends how the charges are distributed in the material, and on the material's conductance. If you have a metal, the charges of the plates would be mobile and result in a hard to compute distribution. I cannot help you with that. There are probably good approximations to tackle those kind of problems but I am no expert.
If the charges are static and equally distributed among the surface, and the material has a relative permittivity ($\varepsilon_r=1$), you can use Coulombs law with respect to infinitesimal parts of the charges and integrate over them.
If you suppose that the rectangles have a width of $10cm$, the force of the top plate on the middle plate could be calculated by
$$
\hat{F}_{12} = \frac{1C^2}{4\pi\varepsilon_0}\int_{-25cm}^{25cm}\frac{dx_1}{50cm} \int_{20cm}^{30cm}\frac{dy_2}{10cm}
\int_{-15cm}^{15cm} \frac{dx_2}{30cm} \int_{0cm}^{10cm}\frac{dy_2}{10cm}
\frac{1}{(x_2-x_1)^2+(y_2-y_1)^2} \begin{pmatrix}x_2-x_1\\y_2-y_1\end{pmatrix}
$$
As you can see, this is already quite complicated with the favourable assumptions we made. This should have an analytical solution but at the moment I am too lazy to do it. Wolfram Alpha could probably do the separate integrals and you would have to piece them together. You could then compare the result with what you would expect from point charges.
Ahh, and don't forget that there is the other plate as well. You would need to repeat the integration with opposite sign to obtain the second force and then take the difference for the total force. | {
"domain": "physics.stackexchange",
"id": 15559,
"tags": "homework-and-exercises, electrostatics, coulombs-law"
} |
Thermodynamic reversible process | Question: My book says
A process is said to be reversible if it is carried out
infinitesimally slowly so that in each step thermodynamic equilibrium of the system remains unchanged & any infinitesimal
change in the condition can reverse the process to restore the initial
state of the system & universe . Here the driving force is
infinitesimally greater than opposing force.
I have some confusions with this definition.
In the definition,they have mentioned 'step of the process'. Is 'step' at an instant or for a period of time? If it is for instant,how can I explain thermal equilibrium and mechanical equilibrium (which are to be maintained for reversibility)?
Suppose the process connects two states of a gas in a cylinder connected with piston headed by lead shots. The initial and final pressures of gas can be given as $P_i$ & $P_f$ . When we remove some lead shots, the gas does work to go to balance the force by moving the piston so that the pressure of the piston and that of gas become equal . How can I then explain the existence of mechanical equilibrium during the process? (It is after the process the pressure becomes equal.)
In the last,they have mentioned about driving force and opposing force. What are they? Why the former is greater than the later? I know it is big but it will be a great help if anyone explain me the three questions (or confusions) . The definition is very intricate. Help.
Answer: This is a very interesting, important and at the same time subtle matter which is useful not only in thermodynamics, but other areas as well, and not everyone quite understands it.
First off, two statements that you'd better just memorize for now:
In a reversible process, equilibrium is never left. Yes, that is a paradox. That's why all real processes are irreversible. However, a real, irreversible process can approximate a reversible one.
A reversible process does not waste work: no other process can do more work going from state A to B of a substance than a reversible one. That also mean that all irreversible processes are wasting work, that is, when carrying out an irreversible process, you're losing the opportunity to have even more work done.
Equilibrium, Reversibility and Driving vs Opposing Forces
Let me try to give you an informal but intuitive notion of what reversibility has to do with equilbrium and balance between driving and opposing forces.
Equilibrium is achieved when a driving force in some direction is countered by an opposing force in the opposite direction. The concept of force here can be expanded to include whatever wants to steer any situation to a particular direction. Let's work with the example you gave:
Gas in cylinder with pressure $p_i$ under piston of area $A$ over which lie one thousand lead shots each weighing $w = \frac{p_iA}{1000}$.
The gas wants to expand (driving force), but the thousand shots exactly oppose it (opposing force). All is well and in equilibium here; nothing moves or changes.
Now imagine that you removed one shot. For a brief moment, the force in the piston due to the gas pressure would be marginally bigger than that due to the shots' weight, such that the piston would accelerate upwards a little, thus expanding the gas. This expansion would lead to a small decrease in pressure, which would eventually equal the current weight (99,9% of the original). In other words, for a brief period of time, the driving force trumps the opposing force. Then we have equilibrium again.
If your goal is, say, $p_f = \frac{1}{2}{p_i}$, you can keep doing that until only 500 lead shots are left. Each removal is one step of your process, and note that every step is a small period of disequilibrium between two equilibrium states. We can say that our example process is an approximation of a process that never leaves equilibrium (in this case, mechanical equilibrium!). Also note that our process did some work!
On the other hand, what would happen if, instead of patiently removing one lead shot at a time, we abruptly took away 500 shots? Well, suddenly the driving force (gas pressure on piston) would be twice as big as the opposing force. The piston would crazily accelerate upwards and the gas would expand rapidly. After quite some time, of course the same state would be reached: $p_f = \frac{1}{2} p_i$, $v_f = 2 v_i$. Note however, that the work done is smaller! In this case, only 500 shots were elevated to the final height, while, in the former, the same 500 shots were elevated PLUS 500 others were left along the way, at each height of each step of the process.
The former process (slow) is a better approximation of a reversible process than the latter (fast). Note that, when something is at an equilibrium, a very small push is all you need to tip the state in the direction you want, minimizing time spent in disequilibrium. You don't need to roundhouse-kick it to the desired direction. The more the driving force surpasses the opposing force, the greater the irreversibilities of the process. However, it does happen faster that way.
We can also think of an example involving thermal equilibrium. Suppose you want to raise the temperature of an object from 0 to 100 degrees. If you just put it in contact with an object at 100 degrees, you'll have a driving force disproportionately bigger than the opposing force — that is, nature is DYING to heat that cold object. However, if you used one hundred objects, each at a different temperature, to incrementing our temperature 1 degree at a time, the process would be more gentle. We stay in equilibrium more. Our newfound intuition tells us that this process is closer to a reversible one.
Conclusion
We can sum this up answering your points:
A step would be a period of time where the system moves toward its goal. In a reversible process, the driving forces (e.g. temperature or pressure differences) would be so small that each step would take forever. In a real process, generally, the "more reversible" it is, the longer each step will take.
This is an idealization which can be approximated by having a series of "small disequilibriums" instead of a large pressure difference in each side of the piston (akin to a free expansion)
These are abstract terms (i.e. may not be real forces). The driving force is that which leads the process in the direction you want. The opposing force is that which leads it in the reverse direction. Like pressure differences in a mechanical problem, or temperature differences in a heat transfer problem. The greater the difference between these forces, the farther from equilibrium the system will be, and the process will generally happen faster and more violently. This also means a less reversible process. | {
"domain": "physics.stackexchange",
"id": 54166,
"tags": "thermodynamics, equilibrium, reversibility"
} |
N dimensional array index utility | Question: I created the below class to help working with ND Arrays, mapping based on this question. This will help in implementing the code for handling convolutions.
How can I improve upon this? Is there something essential I'm missing from the interface?
header:
class NDArrayIndex{
public:
NDArrayIndex(
std::initializer_list<std::uint32_t> dimensions, std::int32_t padding = 0,
std::initializer_list<std::uint32_t> position = {}
);
NDArrayIndex& set(const std::vector<std::uint32_t>& position);
NDArrayIndex& step();
NDArrayIndex& step(std::uint32_t dimension, std::int32_t delta);
const std::vector<std::uint32_t>& position() const{
return m_position;
}
std::optional<std::uint32_t> calculate_mapped_position(const std::vector<std::uint32_t>& position) const;
std::optional<std::uint32_t> mapped_position() const{
return m_mappedIndex;
}
bool inside_bounds(const std::vector<std::uint32_t>& position, std::uint32_t dimension = 0u, std::int32_t delta = 0) const;
bool inside_bounds(std::uint32_t dimension = 0u, std::int32_t delta = 0) const{
return inside_bounds(m_position, dimension, delta);
}
bool inside_bounds(const NDArrayIndex& index, std::uint32_t dimension = 0u, std::int32_t delta = 0) const{
return inside_bounds(index.position(), dimension, delta);
}
bool inside_content(const std::vector<std::uint32_t>& position, std::uint32_t dimension = 0u, std::int32_t delta = 0) const;
bool inside_content(std::uint32_t dimension = 0u, std::int32_t delta = 0) const{
return inside_content(m_position, dimension, delta);
}
bool inside_content(const NDArrayIndex& index, std::uint32_t dimension = 0u, std::int32_t delta = 0) const{
return inside_content(index.position(), dimension, delta);
}
using IntervalPart = std::pair<std::uint32_t, std::uint32_t>;
std::vector<IntervalPart> mappable_parts_of(std::uint32_t dimension, std::int32_t delta) const{
return mappable_parts_of(m_position, dimension, delta);
}
std::vector<IntervalPart> mappable_parts_of(
const std::vector<std::uint32_t>& position, std::uint32_t dimension, std::int32_t delta
) const;
std::uint32_t buffer_size(){
return m_bufferSize;
}
private:
const std::vector<std::uint32_t> m_dimensions;
const std::int32_t m_padding;
const std::vector<std::uint32_t> m_strides;
const std::uint32_t m_bufferSize;
std::vector<std::uint32_t> m_position;
std::optional<std::uint32_t> m_mappedIndex;
};
source:
NDArrayIndex::NDArrayIndex(
std::initializer_list<std::uint32_t> dimensions, std::int32_t padding,
std::initializer_list<std::uint32_t> position
)
: m_dimensions(dimensions)
, m_padding(padding)
, m_strides(init_strides(dimensions, m_padding))
, m_bufferSize(std::accumulate(m_dimensions.begin(), m_dimensions.end(), 1.0,
[](const std::uint32_t& partial, const std::uint32_t& element){ return partial * element; }
))
, m_position(init_position(m_dimensions, position))
, m_mappedIndex(calculate_mapped_position(m_position))
{
assert(0 == std::count(m_dimensions.begin(), m_dimensions.end(), 0));
assert(inside_bounds(m_position));
}
NDArrayIndex& NDArrayIndex::set(const std::vector<std::uint32_t>& position){
assert(position.size() == m_position.size());
assert(inside_bounds(position));
m_position = position;
m_mappedIndex = calculate_mapped_position(m_position);
assert( (!m_mappedIndex.has_value())||(m_mappedIndex.value() < m_bufferSize) );
return *this;
}
NDArrayIndex& NDArrayIndex::step(){
std::uint32_t dim = 0;
bool changed = false;
while(dim < m_dimensions.size()){
if(inside_bounds(dim, 1)){
step(dim, 1);
break;
}else{
changed = true;
m_position[dim] = 0;
}
++dim;
}
if(dim >= m_dimensions.size()){
m_mappedIndex = 0; /* Overflow happened, start from the beginning */
}else{
if(changed)m_mappedIndex = calculate_mapped_position(m_position);
assert(m_mappedIndex < m_bufferSize);
}
return *this;
}
NDArrayIndex& NDArrayIndex::step(std::uint32_t dimension, std::int32_t delta){
const std::int32_t new_position = static_cast<std::int32_t>(m_position[dimension]) + delta;
assert(0 <= new_position);
assert((m_dimensions[dimension] + (2 * std::max(0, m_padding))) > static_cast<std::uint32_t>(new_position));
m_position[dimension] = new_position;
bool new_position_is_inside_content = inside_content(m_position);
if(m_mappedIndex.has_value() && new_position_is_inside_content){ /* m_mappedIndex has a value if the previous position was valid */
m_mappedIndex.value() += m_strides[dimension] * delta;
assert(m_mappedIndex < m_bufferSize);
}else if(new_position_is_inside_content){ /* if the new position is inside bounds, then the mapped index can be caluclated */
m_mappedIndex = calculate_mapped_position(m_position);
}else m_mappedIndex = {}; /* No mapped index for positions inside the padding */
return *this;
}
std::optional<std::uint32_t> NDArrayIndex::calculate_mapped_position(const std::vector<std::uint32_t>& position) const{
assert(position.size() == m_strides.size());
if(!inside_content(position))
return {};
std::uint32_t result_index = 0u;
for(std::uint32_t dim = 0; dim < position.size(); ++dim){
result_index += (position[dim] - std::max(m_padding, -m_padding)) * m_strides[dim];
}
return result_index;
}
bool NDArrayIndex::inside_bounds(const std::vector<std::uint32_t>& position, std::uint32_t dimension, std::int32_t delta) const{
std::uint32_t dimension_index = 0;
return std::all_of(position.begin(), position.end(),
[this, &dimension_index, dimension, delta](const std::uint32_t& pos){
std::int32_t position = static_cast<std::int32_t>(pos);
if(dimension_index == dimension) position += delta;
return( (0 <= position)&&(position < static_cast<int32_t>(2 * std::max(0, m_padding) + m_dimensions[dimension_index++])) );
}
);
}
bool NDArrayIndex::inside_content(const std::vector<std::uint32_t>& position, std::uint32_t dimension, std::int32_t delta) const{
std::uint32_t dimension_index = 0;
return std::all_of(position.begin(), position.end(),
[this, &dimension_index, dimension, delta](const std::uint32_t& pos){
std::int32_t actual_position = static_cast<std::int32_t>(pos);
if(dimension_index == dimension) actual_position += delta;
return(
(std::max(m_padding, -m_padding) <= actual_position)
&&(actual_position < static_cast<std::int32_t>(m_dimensions[dimension_index++] + m_padding))
);
}
);
}
std::vector<NDArrayIndex::IntervalPart> NDArrayIndex::mappable_parts_of(
const std::vector<std::uint32_t>& position, std::uint32_t dimension, std::int32_t delta
) const{
std::vector<NDArrayIndex::IntervalPart> result;
bool part_in_progress = false;
for(std::int32_t delta_index = 0; delta_index < delta; delta_index += std::copysign(1, delta)){
const bool current_position_in_inside_content = inside_content(position, dimension, delta_index);
if(current_position_in_inside_content && part_in_progress){
assert(0 < result.size());
++std::get<1>(result.back()); /* Increase the size of the current part of the interval */
}else if(current_position_in_inside_content){ /* If the interval iteration became inside bounds */
result.push_back({(position[dimension] + delta_index), 1}); /* Add the new part as a result */
part_in_progress = true;
}else part_in_progress = false;
}
return result;
}
and with the following tests:
TEST_CASE("Testing NDArray Indexing with a 2D array without padding", "[NDArray]"){
std::uint32_t width = rand()%100;
std::uint32_t height = rand()%100;
NDArrayIndex idx({width, height});
for(std::uint32_t variant = 0; variant < 5; ++variant){
std::uint32_t x = rand()%width;
std::uint32_t y = rand()%height;
idx.set({x,y});
REQUIRE(idx.inside_bounds());
REQUIRE(idx.mapped_position().has_value());
REQUIRE(idx.mapped_position().value() == (x + (y * width)));
std::uint32_t elements_after_x_row = width - x;
REQUIRE(1 == idx.mappable_parts_of(0,width).size());
REQUIRE(x == std::get<0>(idx.mappable_parts_of(0,width)[0]));
REQUIRE(elements_after_x_row == std::get<1>(idx.mappable_parts_of(0,width)[0]));
/*!Note: using width in the above interfaces because it is guaranteed
* that an interval of that size spans over the relevant dimension
* */
}
REQUIRE(idx.buffer_size() == (width * height));
idx.set({0,0});
for(std::uint32_t i = 0; i < idx.buffer_size(); ++i){
REQUIRE(idx.inside_bounds());
REQUIRE(idx.inside_content());
REQUIRE(idx.mapped_position().has_value() == true);
REQUIRE(idx.mapped_position().value() == i);
idx.step();
}
}
TEST_CASE("Testing NDArray Indexing with a 2D array with positive padding", "[NDArray][padding]"){
std::uint32_t width = 1 + rand()%20;
std::uint32_t height = 1 + rand()%20;
std::int32_t padding = 5;
NDArrayIndex idx({width, height}, padding);
for(std::uint32_t variant = 0; variant < 5; ++variant){
std::uint32_t x = padding + rand()%(width);
std::uint32_t y = padding + rand()%(height);
idx.set({x,y});
REQUIRE(idx.inside_bounds());
REQUIRE(idx.mapped_position().has_value());
REQUIRE( idx.mapped_position().value() == (x - padding + ((y - padding) * width)) );
std::uint32_t elements_after_x_row = padding + width - x;
REQUIRE(1 == idx.mappable_parts_of(0,width).size());
REQUIRE(x == std::get<0>(idx.mappable_parts_of(0,width)[0]));
REQUIRE(elements_after_x_row == std::get<1>(idx.mappable_parts_of(0,width)[0]));
}
REQUIRE(idx.buffer_size() == (width * height));
std::uint32_t x = 0u;
std::uint32_t y = 0u;
std::uint32_t reference_mapped_position = 0u;
idx.set({0,0});
for(std::uint32_t i = 0; i < idx.buffer_size(); ++i){
if(
(padding <= static_cast<std::int32_t>(x) && x < (padding + width))
&&(padding <= static_cast<std::int32_t>(y) && y < (padding + height))
){
REQUIRE(idx.inside_bounds());
REQUIRE(idx.inside_content());
REQUIRE(idx.mapped_position().has_value() == true);
REQUIRE(idx.mapped_position().value() == reference_mapped_position);
++reference_mapped_position;
}else{
REQUIRE(idx.inside_bounds());
REQUIRE(idx.mapped_position().has_value() == false);
}
idx.step();
if(x < padding + width + padding - 1){
++x;
}else{
x = 0;
++y;
}
}
}
TEST_CASE("Testing NDArray Indexing with a 2D array with negative padding", "[NDArray][padding]"){
std::uint32_t width = 11 + rand()%20;
std::uint32_t height = 11 + rand()%20;
std::int32_t padding = -5;
NDArrayIndex idx({width, height}, padding);
for(std::uint32_t variant = 0; variant < 5; ++variant){
std::uint32_t x = -padding + rand()%(width + 2 * padding);
std::uint32_t y = -padding + rand()%(height + 2 * padding);
idx.set({x,y});
REQUIRE(idx.inside_bounds());
REQUIRE(idx.mapped_position().has_value());
REQUIRE( idx.mapped_position().value() == (x + padding + ((y + padding) * (width + 2 * padding))) );
std::uint32_t elements_after_x_row = padding + width - x;
REQUIRE(1 == idx.mappable_parts_of(0,width).size());
REQUIRE(x == std::get<0>(idx.mappable_parts_of(0,width)[0]));
REQUIRE(elements_after_x_row == std::get<1>(idx.mappable_parts_of(0,width)[0]));
}
REQUIRE(idx.buffer_size() == (width * height));
std::uint32_t x = 0u;
std::uint32_t y = 0u;
std::uint32_t reference_mapped_position = 0u;
idx.set({0,0});
for(std::uint32_t i = 0; i < idx.buffer_size(); ++i){
if(
(-padding <= static_cast<std::int32_t>(x) && x < (padding + width))
&&(-padding <= static_cast<std::int32_t>(y) && y < (padding + height))
){
REQUIRE(idx.inside_bounds());
REQUIRE(idx.inside_content());
REQUIRE(idx.mapped_position().has_value() == true);
REQUIRE(idx.mapped_position().value() == reference_mapped_position);
++reference_mapped_position;
}else{
REQUIRE(idx.inside_bounds());
REQUIRE(idx.mapped_position().has_value() == false);
}
idx.step();
if(x < (width - 1)){
++x;
}else{
x = 0;
++y;
}
}
}
Answer: Add Doxygen documentation
I'm having a hard time understanding what the purpose is of all those member functions. Adding Doxygen documentation for the class and all its members would be of great help.
Do you need a variable number of dimensions?
Usually when dealing with data, you already know its dimensionality. It would make more sense then to make NDArrayIndex be a template, with the template parameter being the number of dimensions, and then use std::array instead of std::vector to store the things that are now stored in std::vectors. This will allow the compiler to optimize the code much better, and avoids all the memory allocations.
Even if you don't know the number of dimensions up front, maybe you can make the template parameter be the maximum number of dimensions, so you can still use std::arrays. Consider that, at least on Linux on AMD64, the size of an empty std::vector is 24 bytes, which is the same size as a std::array<uint32_t, 6>.
Why is padding a scalar?
It's weird that m_dimensions and m_strides are vectors, but m_padding is a scalar. There is no reason why you could not have a different padding size for each dimension.
Prefer declaring a struct instead of using std::pair
While std::pair and std::tuple are sometimes helpful in generic code, if you can just declare a struct instead, prefer the latter. This allows you to give names to the two elements of the pair, and avoids the ugly calls to std::get<>():
struct IntervalPart {
std::uint32_t start;
std::uint32_t size;
};
Unsafe conversions between signed and unsigned integers
The API makes it seem like any unsigned 32-bit value is safe to be used, but the cast to std::int32_t inside inside_bounds() makes large values unsafe. You should handle this somehow.
Consider adding iterators
It looks to me like in the end, you want to iterate over the mappable part of a multi-dimensional array. Now you have to call a mix of mappable_parts_of(), step() and mapped_position(). But all of this is slow; if you have the mappable parts, you shouldn't need the fancyness of step(), and you don't need the bounds checking done by mapped_position(). I think that an interface that provides an iterator to efficiently iterate over a mappable part would be best. For example:
NDArray array = ...;
NDArrayIndex index = ...;
for (auto position: index.mappable_parts_range(...)) {
do_something_with(array[position]);
}
Of course, for convolutions you need two positions, one for each array you want to convolve, that move together, so you either want to be able to derive one from the other quickly, or have an even more fancy API that allows provides you with both positions simultaneously, or perhaps even pass in the arrays directly and have it return references to array elements:
NDArray array1, array2;
float sum = {};
...
for (auto& [el1, el2]: overlapping_range(array1, array2, ...)) {
sum += el1 * el2;
} | {
"domain": "codereview.stackexchange",
"id": 44031,
"tags": "c++, array, matrix"
} |
Why electrons get excited? | Question: Why and how are electrons get excited and what happen inside an atom when electrons get excited?
Answer: In an atom, an electron can only have energies of a certain discrete set of values. These values are referred to as "energy levels". For this reason it is said that energy levels are quantizied.
To go from a lower level to a higher level, a photon matching the energy difference between the energy levels is absorbed.
When the electron changes energy levels, the wave-function of the electron changes. For example, the probability distribution of where the electron is located in the atom changes. Generally, the higher the energy level, the greater the probability that the electron will be located further away from the nucleus. | {
"domain": "physics.stackexchange",
"id": 12449,
"tags": "electrons, atoms"
} |
Evanescent waves and photon tunneling | Question: Context
I am reading about near-field heat transfer. Generally this phenomena is describes using (classical) Maxwell equations. In vacuum heat transfer from a body A to a body B such that $T_A > T_B$ happens through radiation. If the distance between these bodies is smaller than the wavelength of the blackbody radiation of A (more specifically smaller than the wavelength given by Wien's displacement law) heat transfer is enhanced by several orders of magnitudes.
Electromagnetic waves have to cross two interfaces before being absorbed in body B. Namely the A-Vacuum (A-V) interface and the Vacuum-B (V-B) interface. For some materials you can get total reflection at the A-V interface. In this situation an evanescent wave. The latter travels parallel to the A-V interface and decay exponentially perpendicularly with respect the A-V interface. The Poynting vector is zero in the perpendicular direction. If now the V-B interface is close enough (and parallel to A-V) to the evanescent wave, an EM wave can be observed in B with non-zero energy density. The energy passed through the vacuum gap even though the Poynting vector is zero (perpendicularly to A-V).
Question
The mathematics is extremely similar to the tunnelling of, for instance, electrons in quantum mechanics. In some books/paper I saw people referring to this phenomenon as photon tunnelling. Is this energy transfer completely described with classical equations photon tunnelling? Is then tunnelling related to wave mechanics and NOT strictly to quantum mechanics?
Answer: I think you are probably getting a bit too worried about words and their meaning and are possibly trying to ascribe more precision to natural English words than they can give you without further precise description in mathematical language.
As you witness, the mathematics describing photon tunnelling and evanescent waves is exactly the same as that describing electron tunnelling into classically forbidden regions. Maxwell's equations are both classical equations and can be interpreted as the propagation equation for a one-photon state. So both phenomena - photon and electron tunnelling - are equally quantum mechanics and the mechanics of waves. The two aren't mutually exclusive: the propagation equations in quantum mechanics naturally lead to D'Alembert's and like equations. If you interpret Maxwell's equations for a tunnelling wave into a system of dielectric layers as the propagation equation for a lone photon, the energy density as a function of position for the properly normalized solution is interpreted as the probability to destructively detect the photon at the point in question with a detector when one photon states propagate into the layers separately and the Poynting vector becomes the flux of this probability.
Question from OP
I fully agree with what you say. Maxwell <-> photon tunnelling and Shrodinger <-> electron tunnelling are the "same" thing. But Shrodinger equation leads to quantised states (example hydrogen atom), hence it is part of quantum mechanics. Can Maxwell equations lead to quantised states as Shrodinger does? What is puzzling to me, I think, is the following: Why is Shrodinger description of particles (wave nature) considered quantum mechanics and Maxwell description is not?
Certainly Maxwell's equations lead to bound states. Look at the bound states of an optical fiber, which are shift-invariant eigenfunctions of the form $\Psi(x,\,y)\,e^{i\,\beta\,z}$, where the z direction is along the optical axis of the fiber, and where the propagation constant $\beta$ lies between the core and cladding wavenumbers. This is the discrete spectrum of the relevant Sturm-Liuoville system. As a quantum mechanical description when there is one photon in the mode system at a time, the propagation equation is actually the propagation for a pseudo particle called various things - polariton is probably the most apposite to an optical fiber propagation. The pseudo particle is a quantum superposition of pure EM field one-photon states and excited matter states in the fiber's material.
You ask why this kind of thing isn't called "quantum mechanics". Well it most certainly is part of quantum mechanics and the reason it isn't often referred to as such is probably historical. There is no nonrelativistic description of the photon - Maxwell's Equations are fully Lorentz-covariant - in contrast with the atomic electron Schrödinger equation which describes a nonrelativistic approximation. Such approximations admit position co-ordinates where the wavefunction can be loosely interpreted as definining, through its magnitude, the probability of "finding" an electron at a given position. This kind of thing isn't possible for the relativistic photon, or, for that matter, the relativistic electron described by the Dirac equation (note that Maxwell's equations can indeed be written as a Schrödinger equation and also that Maxwell's equations are equivalent to the Dirac equation for a massless particle). See my answer here and also here and here for further details. The question of photonic wave functions is addressed in detail in the works of Iwo Bialynicki-Birula, for example, cited in my answers. | {
"domain": "physics.stackexchange",
"id": 33293,
"tags": "quantum-mechanics, waves, electromagnetic-radiation, thermal-radiation, quantum-tunneling"
} |
Why do sensors that emit higher frequency signals give more accurate data? | Question: I am doing a technical presentation about RADAR and LiDAR. I understand that LiDAR is several times more accurate and capable of producing really detailed 3-D maps of their surroundings, while RADARs tend to lag behind in accuracy.
Several sources indicate that the shorter wavelength signals from LiDAR contribute to its higher accuracy, but they don't really explain why.
This webpage explains that higher frequency signals yield more accurate data in RADARs, but does not really explain why either:
http://www.radartutorial.eu/07.waves/Waves%20and%20Frequency%20Ranges.en.html
There are some posts that mention the Heisenberg Uncertainty Principle, but I don't think an explanation at the atomic level is really relevant nor required to explain this phenomenon.
Can anybody give an equation or state a scientific concept as to why sensors/apparatuses that use higher frequency signals yield more accurate data?
Answer: In the article that anna v mentioned:
robotsforroboticists.com/lidar-vs-radar
there is a segment which states:
"The down side [of the RADAR] is that if an object is much smaller than the RF wave being used, the object might not reflect back enough energy to be detected. For that reason many RADAR’s in use for obstacle detection will be “high frequency” so that the wavelength is shorter (hence why we often use mm-wave in robotics) and can detect smaller objects."
This does provide some intuitive understanding as to why devices emitting higher frequency signals provide high-resolution data. Small objects and the small/fine details of large objects (ie. protrusions of a wall, a pedestrian's facial features, and bumps/cracks/curves that make up the texture of any surface) do not reflect enough low-frequency EM wave energy back to the RADAR. Thus, such fine details are not detected by RADAR.
If the generated signals were of higher frequency, then the EM wave oscillates faster, and a greater percentage of the wave would hit and be reflected by small objects and the fine details of large objects (ie. protrusions/bumps/curves). Thus, a sensor emitting waves at a higher frequency can detect such details.
The above explanation may not be not most technical, but I hope it provides others with a more intuitive understanding of how I reasoned it out.
Thanks everyone for sharing their ideas! | {
"domain": "physics.stackexchange",
"id": 53390,
"tags": "electromagnetism, frequency, radio-frequency, sensor, radar"
} |
How do I rotate a warping grid without changing the warped image's orientation? | Question: I have a warping grid, that has to be applied to an image. When I apply the grid, I get the results I expect.
Now, I want to apply the same grid to the same image, but the image is modified in the following way: it is rotated 270 degrees counterclockwise.
The grid is stored in memory as an array of points. It is [-1, -1] - [1, 1] normalized. I am trying to rotate the whole thing around the origin, that is [0, 0] id est the center of the image. Also, I am trying to rotate in discrete steps of 90 degrees.
P1 P2 P3
P4 P5 P6
P7 P8 P9
These are destination coordinates of the points in the image. For example, the lower right corner of the input image has coordinates of [1,1]. In the output image, this point is moved to [P9.x, P9.y] (a.k.a. forward mapping).
I transpose the matrix:
P1 P4 P7
P2 P5 P8
P3 P6 P9
Then I apply to every point the transformation
x = y'
y = -x',
where ' denotes old coordinate and lack of prime denotes new coordinate. I apply the derived grid to the rotate
However, I do not get the expected warp. This task seemed incredibly simple to me, but I find myself struggling with the strange behaviour for a third day. Could someone point out my mistake or a procedure to get things right?
Answer: In two dimensions, to rotate a point by a certain angle you need to multiply that point by a rotation matrix of the form:
$
\left[\begin{array}{c}x' \\y' \end{array}\right]
=
\left[\begin{array}{cc}\cos\theta & -\sin\theta \\\sin\theta & \cos\theta\end{array}\right]
\left[\begin{array}{c}x \\y \end{array}\right]
$
Where $(x, y)$ is the old coordinate, $(x', y')$ is the new coordinate, and $\theta$ is the angle of (counterclockwise) rotation you want to apply.
Thus, for a rotation of 270 degrees, the matrix you apply is:
$
\left[\begin{array}{c}x' \\y' \end{array}\right]
=
\left[\begin{array}{cc}\cos(270)& -\sin(270) \\\sin(270) & \cos(270)\end{array}\right]\left[\begin{array}{c}x \\y \end{array}\right]
= \left[\begin{array}{cc}0& 1 \\-1 & 0\end{array}\right]\left[\begin{array}{c}x \\y \end{array}\right]
= \left[\begin{array}{c}y \\-x \end{array}\right]
$
There is a comprehensive wikipedia page on rotation matrices, explaining them in more detail, extending them to 3D, and sumarising some of their properties. | {
"domain": "dsp.stackexchange",
"id": 351,
"tags": "image-processing"
} |
Playing "craps" for the win | Question: No specific question here. I am beginning with Java and here is an exercise. The rules of game:
You roll two dice. Each die has six faces, which contain one, two,
three, four, five and six spots, respectively. After the dice have
come to rest, the sum of the spots on the two upward faces is
calculated. If the sum is 7 or 11 on the first throw, you win. If the
sum is 2, 3 or 12 on the first throw (called “craps”), you lose (i.e.,
the “house” wins). If the sum is 4, 5, 6, 8, 9 or 10 on the first
throw, that sum becomes your “point.” To win, you must continue
rolling the dice until you “make your point” (i.e., roll that same
point value). You lose by rolling a 7 before making your point.
import java.io.IOException;
import java.util.Random;
import java.util.Scanner;
public class helloworld {
private static int point;
private static enum possibleResults {UNDEFINED, WIN, LOSE};
private static possibleResults gameResult = possibleResults.UNDEFINED;
private static String causeOfLose;
private static String causeOfWin;
public static int rollDice(int n) {
int sum = 0;
Random randomNumbers = new Random();
for(int i=0; i<n; i++) {
sum += (1 + randomNumbers.nextInt(6));
}
System.out.printf("Rolling dice... You got %d\n", sum);
return sum;
}
public static void firstRoll() throws IOException {
waitUser();
int tmp = rollDice(2);
if(tmp == 7 || tmp == 11) {
gameResult = possibleResults.WIN;
causeOfWin = String.format("you have got a lucky number in the first round: %d.", tmp);
} else if(tmp == 2 || tmp == 3 || tmp == 12) {
gameResult = possibleResults.LOSE;
causeOfLose = String.format("you have got an unlucky number in the first round: %d.", tmp);
} else {
point = tmp;
System.out.printf("Your point is %d, you need to make your point to win.%n", point);
}
declareResult();
}
public static void declareResult() {
if(gameResult == possibleResults.LOSE) {
System.out.printf("You have lost, because %s%n", causeOfLose);
} else if(gameResult == possibleResults.WIN) {
System.out.printf("You have won, because %s%n", causeOfWin);
} else {
System.out.println("Game continues...");
}
}
public static void moreRolls() {
Scanner inputScan = new Scanner(System.in);
while(gameResult == possibleResults.UNDEFINED) {
waitUser();
int tmp = rollDice(2);
if(tmp == point) {
gameResult = possibleResults.WIN;
causeOfWin = "you have made your point!";
} else if(tmp == 7) {
gameResult = possibleResults.LOSE;
causeOfLose = "you have hit 7 before making your point.";
} else {
;
}
declareResult();
}
}
private static void waitUser() {
Scanner inputScan = new Scanner(System.in);
System.out.print("Press enter to start rolling: ");
String input = inputScan.nextLine();
}
public static void main(String[] args) throws IOException {
firstRoll();
moreRolls();
}
}
Any corrections and/or suggestions are welcome.
Answer: A class called helloworld? ClassNames in Java should have CapitalizedCamelCase, so your class should be HelloWorld, but Craps is probably a better name.
All your variables are static?
Creating a new Random instance each time you roll the dice is an unnecessary overhead. This is a case where you can have a static variable (it is thread-safe).
You call rollDice(2), and never any other input value. Why not make the method simply:
public static int rollDice() {
return 2 + randomNumbers.nextInt(6) + randomNumbers.nextInt(6);
}
Note that methods should do one thing, and one thing only. Your rollDice was rolling the dice, and also printing the result. Printing the output should be the responsibility of some other method.
A switch statement will help your turn methods:
int roll = rollDice();
switch(roll) {
case 7:
case 11:
gameResult = possibleResults.WIN;
causeOfWin = String.format("you have got a lucky number in the first round: %d.", roll);
break;
case 2:
case 3:
case 12:
gameResult = possibleResults.LOSE;
causeOfLose = String.format("you have got an unlucky number in the first round: %d.", roll);
break;
default:
point = roll;
System.out.printf("Your point is %d, you need to make your point to win.%n", point);
} | {
"domain": "codereview.stackexchange",
"id": 10444,
"tags": "java, beginner, game, random, dice"
} |
Equilibrium Graphs | Question:
What if I2 and H2 began at the same concentration - then do we start at the same point? ... or is it acceptable to start at different concentrations...
Answer:
What if I2 and H2 began at the same concentration - then do we start at the same point?
Yes (and in textbook problems involving graphs this situation would be avoided for the reason pointed out in brinnb's comment)
is it acceptable to start at different concentrations...
Also yes. No law against it.
I'm guessing that the graphs are part of a textbook problem. Are you able to figure out which graph can occur? | {
"domain": "chemistry.stackexchange",
"id": 1561,
"tags": "homework, thermodynamics, equilibrium, concentration"
} |
How to run a model stored as a .Rda file? | Question: I am using an already written R code which has a line of code as shown below
model_predictors <- buildModel(flag, data, outcomeName, folder)
model<-model_predictors$model
predictorsNames<-model_predictors$predictorsNames
auc <- model_predictors$auc
save(model, file=paste(folder,studyName,'_model_', flag$model[1], '_', outcomeName,".Rda",sep=''))
As you can see in the last line of code, the (training) model is saved in folder in a .Rda file format with a naming convention _model_. Now, I have to use this model to test/evaluate on unseen dataset.
So now my question, I see a .Rda file in my folder path and I can load the .Rda file in Rstudio but how do I make inference from this model? I am new to R and can anyone help me with this please?
Can someone help me understand how to run this model please?
Answer: I suppose you come from Python. R is quite different as it does not require to save to external files your model, you just need to have it in your workspace. Once you have a model object, you just have to use the predict function and specify the unseen dataset. Here's an example of a tree model:
model<-tree(Y~X1+X2, data=dataset) #estimate of the tree model
predictions<-predict(model, newdata=unseen_dataset) #prediction on new data
Be sure variable names between datasets correspond. | {
"domain": "datascience.stackexchange",
"id": 7480,
"tags": "machine-learning, neural-network, deep-learning, classification, r"
} |
Discrete-time realization for continuous time domain controller/filter? | Question: Let us say I simply design a low pass filter like (1/s+1) with the cutoff frequency as 1 rad/sec, when I implement it in real software, do I have to do the discrete-time realization? If not, what issues will I have? How to design a discrete low pass filter that have the same cutoff frequency?
Answer:
Do I have to do the discrete-time realization? If not, what issues will I have?
AFAIK, you cannot implement continuous time filters with digital processors which are inherently discrete time devices (since they execute instructions at discrete clock edges).
How to design a discrete low pass filter that have the same cutoff frequency?
Some methods to transform continuous time filters to discrete time are (from Wikipedia)
Bilinear transform This is probably what you need.
Impulse invariance
Matched Z transform
Discretisation of the continuous time derivative operation. (Euler ?)
The reference given in the Wikipedia article including this one will give you the properties of each of the above techniques.
How to design a discrete low pass filter that have the same cutoff frequency?
The cut off frequency is the usually not the property of the filter we are interested in. We are usually interested in properties like
gain in the pass band,
attenuation in the stop band,
bandwidth,
phase distortion (linear phase response),
ripple in the pass and stop band,
and as implmentation constraints,
order of the filter,
sensitivity to components or coefficients,
memory requirements,
computation time etc.
So rather than trying to get the cut off frequency to match exactly with the continuous time "parent" filter, check if the discrete time filter obtained by conversion meets the "real" requirements. | {
"domain": "engineering.stackexchange",
"id": 4235,
"tags": "design, pid-control"
} |
Doppler shift and change in intensity of a sound wave | Question: How are the intensity of a sound wave and the Doppler shift of frequency related togheter?
That is, if the source or the observer are in relative motion, how does the intensity change?
For a sound wave $$I=\frac{1}{2} \rho \omega^2 A^2 c=2 \pi^2 \rho f^2 A^2c$$
($c$ is sound speed, $\rho$ is density of air, $A$ is amplitude)
So, since Doppler effect is only about $f$, I would say that
$$I'=I \bigg(\frac{f'}{f}\bigg)^2=I \bigg(\frac{c+v_{oss}}{c+v_{sorg}}\bigg)^2$$
But I don't think that this is correct, can anyone give suggestion about this?
Edit I report an example exercise (I'm not looking for the solution, my doubt is conceptual and it is explained above)
A source emits a spherical sound wave at frequency $f=400Hz$ with
power $P=1 W$ in a solid angle of $\frac{\pi}{4} sr$. An observer $A$
is at distance $R=228m$ and does not move, a second observer $B$ is at
the same distance and moves with velocity $v_{B}=200 km/h$ towards
the source. Determine the sound intensity level received by the two observers. Use speed of sound at $20 ° C$,
$v_{sound}=343 m/s$.
Answer : $\bigg[L_{A}=73.9 dB \, \, , \, \, L_{B}=L_{A}+0.65 dB=74.5 dB \bigg]$
I have no problem for $A$
$$I_{A}=\frac{P}{\frac{\pi}{4} R^2}=2.45 \cdot 10^{-5} W/m^2 \implies L_{A}=10 Log \frac{I_A}{10^{-12}}=73.9 dB$$
But I do have problems for $B$. Using the formula proposed in my question I get the wrong result
$$I_{B} =I_{A}(\frac{343+55.55}{343})^2=3.31 \cdot 10^{-5} W/m^2 \implies L_{B}=10 Log \frac{I_B}{10^{-12}}=75.1 dB$$
I do not know why, but without squaring the ratio of frequecy I do get the correct result.
$$I_{B} =I_{A}(\frac{343+55.55}{343})=2.85 \cdot 10^{-5} W/m^2 \implies L_{B}=10 Log \frac{I_B}{10^{-12}}=74.5 dB$$
So I found a way to get the result but I do not understand why should not be correct to square the ratio of frequencies. Furthermore, in the answer the result is given as an adding sound level. I would really like to know how can one get that $+0.65 dB$ directly, so that one knows what to add to the result, without doing a lot of calculations.
Answer: In this type of problem one has to take great care in defining intensities. In this case there are 4 different intensities: 1. $I_{ss}$, the intensity received by the static observer as perceived by himself, 2. $I_{ms}$, the intensity received by the moving observer as perceived by a static observer. 3 $I_sm$, the intensity received by the static observer as perceived by the moving observer and 4 $I_mm$, the intensity received by the moving observer as perceived by himself.
You are trying to compare $I_{mm}$ to $I_{ss}$. These are intensities from two different reference frames and thus incomparable. You should be comparing $I_{ms}$ to $I_{ss}$ or $I_{mm}$ to $I_{sm}$.
The easiest way to see what happens to the intensity when one approaches a source is to compare it with someone shooting paint balls at two observers. One standing still and the other approaching the shooter. At $t=0$ the two observers are at the same distance from the shooter. After a time $\Delta t$, the static observer has received $N=\Delta t F$ paint balls., where the flux $F$ is the number of paint balls per second shot at the observer. The moving observer will have been hit by more paint balls, because during the time $\Delta t$ he has moved $\Delta x$ closer to the shooter. There are thus some paint balls which have already reached the position of the moving observer but not yet that of the static observer. The number of paintballs in mid air between the two observers at $t=\Delta t$ is: $N_{diff}=F\frac{\Delta x}{c}=F\frac{v_{obs} \Delta t}{c}$. The number of paintballs received by the moving observer is thus $N'=F \Delta t + F\frac{v_{obs}}{c} \Delta t$. The relative flux is thus $$\frac{F'}{F}=1+\frac{v_{obs}}{c}=\frac{c+v_{obs}}{c}.$$
The intensity $I$, the amount of energy per second is then given by multiplying the flux with the amount of energy per paint ball. The balls arrive at both observers with the same velocity. However, since both observers will assign a different value for this velocity, they will also perceive different intensities. Still, the ratio between the intensities as perceived by one observer, will be the same for both observers and is identical to the ratio of the Fluxes. | {
"domain": "physics.stackexchange",
"id": 32150,
"tags": "homework-and-exercises, waves, acoustics, frequency, doppler-effect"
} |
Build a ring around Earth, then remove the supports | Question: What would happen if we decided to build a giant ring that managed to wrap around the whole world, end to end that was supported with pillars all along the ring and then the supports all suddenly removed?
Would the ring float in place? or if it fell, what direction would it fall?
Answer: This is a fun what-if. I really wish I was xkcd right now so that I could include pictures and humour, but I'm not.
If we built a giant ring around Earth at just the right height everywhere, then in theory, the ring would just float there once the supports were removed. However, (and this is the reason not to try it) this is an unstable state. If you so much as tap on one side of the ring, the whole thing will come crashing down. It's like balancing a pencil on its point or a bicycle upright while stopped (no training wheels!). Possible, but instability is a b!%#.
But let's back up to how you might go about doing this. The pressures on the ring would be enormous, so you would have to make it very thick and out of really strong materials. Carbon nanotubes would be great but expensive. Odds are we'd go with steel for cost and just make it thicker.
Then you need to build it in exactly the right place and at the right height. And the supports would be none too stable also (why would we not just build this as a giant global bypass and leave the supports in place?). But let's say you got it built. To pull off this feat, you need to remove the supports simultaneously and quickly. That won't be easy since they'll be huge and firmly in place. Best synchronized demolition ever!
Ok, now you have this ring floating. It had to be built above the atmosphere because any winds or atmospheric effects could bring it down. But also, it had to be built at the right altitude to avoid all that space junk we have floating in orbit. (One stray satellite and BAM! we'll bring a whole new meaning to the phrase "Hey look at that huge metal ring that's crashing down to Earth!")
And, to make this easier for us, somehow we got it spinning (not very fast) so that angular momentum and orbital mechanics could help us out (Okay, those don't actually help the ring, but neither do they hurt it and they make it look cool; a spinning ring in space. I call that a net gain, so it helps).
Before the day is out, we have a problem. We forgot one crucial thing. That darn Moon! The tidal forces from the Moon (wherever it is at that time) upset the delicately balanced and unstable ring system. Now we have something that we should have considered; the extremely massive ring of steel we built is crashing towards Earth. It's going to leave a dent and kill lots of folks. On the plus side, building a massive ring like that will look good on your resume. | {
"domain": "physics.stackexchange",
"id": 17626,
"tags": "newtonian-mechanics, newtonian-gravity, orbital-motion, earth, satellites"
} |
What are these "ship wrecks" (pictured) in the old volcano crater? | Question: They consists of stone, of course, but have strange U cross section, as if they were the remaining of some ships without the head and tail section. Could anyone tell how have they formed? They almost perfectly in the center of the old, flat volcano (Strytur) near Hveravellir, Iceland. They are quite high uphill and I am surprised how lava could get into that place and then made into such a shape as it probably only flows down.
Answer: If U-shaped in cross-section, it is plausible that these are the partial remnants of lava tubes. See, for example, Morphology and mechanism of eruption of postglacial shield volcanoes in Iceland (Rossi, 1996). It is difficult to propose more detailed formation mechanisms in the absence of significantly more information about the structure. | {
"domain": "earthscience.stackexchange",
"id": 255,
"tags": "volcanology, evolution"
} |
The time is a type of energy ? | Question: You know energy is necessary for the occurrence of an event . Time is also exactly neccessary for the occurrence of an event. If there is no time , there will be no event. I mean time is a type of energy ?
Answer: Time is only a coordinate, just like x,y,z etc (up, forward, right etc).
Just as you use these coordinates to locate the position of an event in space, you have to use time $t$ to locate it in time. In fact, Einstein's relativity suggests that time is treated exactly like another dimension of space, implying the not unpopular idea that the universe is a 4-dimensional block of 'space-time'. Now if we adopt this idea, we now say that an event is just a point in 'space-time' and so its more like a location.
Energy, meanwhile, is a tricksy quantity but I prefer to think of it as the ability to do 'work' (basically assert a force over a distance). You can have energy even if nothing is happening per se. This is called potential energy, and as the name suggests it is the potential for work to occur. A ball held in the air has the potential to move downwards in earth's gravity.
Does this make things clearer?
TLDR: Basically, time is a location coordinate and not an inherent ability to do work. | {
"domain": "physics.stackexchange",
"id": 9561,
"tags": "energy, time"
} |
Gravitational time dilation with a unit of time corresponding to the period of a light wave | Question: How is it that time runs faster at regions further out from the center of a gravitational field if:
A clock's arm is made to make one cycle for a complete wavelength of a particular light beam. Consider the clock moving upwards from the surface of the earth as the light also moves up. Since the wavelength of the light increases as it loses energy, then the frequency must decrease:
$$c = λν$$
c is a constant, therefore
$$ λ ∝ \frac{1}{ν}$$
But the frequency is the number of cycles the light beam makes in a second: this number reduces. Since the period — the duration of time for once wavelength of the light (one cycle of the clock's arm) — is
$$ T = \frac{1}{f}$$
it increases for a decrease in frequency. Therefore, the duration of time for one cycle increases; the clock runs slower. Thus the clock runs slower with increased de-energisation of the light as it moves further away from the center of the gravitational field.
I get that experimental evidence differs, but I'm having a hard time finding the fault in the above claim.
Answer:
Therefore, the duration of time for one cycle increases; the clock runs slower.
Yes. The clock at a lower altitude runs slower as observed by a clock at a higher altitude. As the light from the lower clock goes higher it loses energy and has a longer period. Since the higher observer sees that the lower clock ticks at the same rate as the light, the gravitational redshift of the light implies that lower clock is slower as measured by the higher clock. | {
"domain": "physics.stackexchange",
"id": 61575,
"tags": "relativity, time-dilation"
} |
Equation of continuity | Question: I came across the following lines that appear after the derivation of equation of continuity for the steady flow of an ideal liquid in Resnick, Halliday, Kranes's Fundamentals of Physics:
The equation of continuity states that if within any volume element of space (not volume of fluid) there are no sources(where additional matter is introduced into the flow) or sinks(where matter is removed from the flow),then the total mass within the volume element must remain constant. In more general cases, if sources or sinks are present, the equation of continuity gives the mathematical representation of the very reasonable assertion that the rate of outflow OR inflow of matter is equal to the rate at which the mass contained in the volume element is changing.
Now, the statement in bold is what is troubling me. Is it not true that the mass contained in a volume element is simply ρdV where ρ is the density of the liquid at the location of some volume element of size dV. Now, since steady flow is assumed, ρ does not change with time. Hence, regardless of whether there are sources or sinks in the element, the mass of the fluid within the element also should not change with time ( as the mass is a function of only ρ and dV which is also constant). So where comes the question of "mass contained in the volume element changing"(refer blockquote)?
Also note the usage of OR in the passage. Does it mean the the rate of outflow is equal to rate of inflow? Are these two rates equal even when there are sources or sinks in the element?
Can someone please elaborate?
Answer: Well, if there are sources or sinks, the density can change in time since mass is being added or removed directly inside the volume, that is, not by just entering it from outside or leaving it through the surface of the volume.
If there are no sources or sinks, however, if the mass inside the volume is to change, it needs to get inside or outside through the surface.
Concerning the "OR": It can happen that there are parts of the surface through which fluid is entering the volume element and at the same time there are other parts of the surface through which fluid is leaving the volume. However, if you sum over all contributions along the whole surface, there can be either a net flow to the inside OR to the outside. The net flow cannot be to the inside AND to the outside at the same time. | {
"domain": "physics.stackexchange",
"id": 44916,
"tags": "fluid-dynamics"
} |
Is it in general true that $\nabla_\mu T^{\mu\nu}=0$ implies the matter equations of motion? | Question: I know of several cases where the covariant conservation of the energy momentum tensor $\nabla_\mu T^{\mu\nu}=0$ can be used to derive the equations of motion of the matter fields. Is this in general true?
Answer: Informal, short answer would be: $\nabla_a T^{ab} = 0$ can be used to deduce at least part (but not necessarily whole) of the matter field equations. A recent discussion about this question can be found in sections 5 and 6 of the paper
I. Smolić: "On the various aspects of electromagnetic potentials in spacetimes with symmetries", Class. Quantum Grav. 31 (2014) 235002. DOI: 10.1088/0264-9381/31/23/235002. arXiv: 1404.1936 [gr-qc]. | {
"domain": "physics.stackexchange",
"id": 19115,
"tags": "general-relativity, conservation-laws, stress-energy-momentum-tensor"
} |
How does the eddy velocity scale with its time scale? | Question: According to Kolmogorov's energy casacade model, if we have a flow with inertial velocity scale $\mathcal{V}$, inertial length scale $\mathcal{L}$ then we can calculate the eddy velocity of an eddy with length scale $l$ according to
$$v_l^3 \sim \epsilon l$$
Where $\epsilon =\mathcal{V}^3/\mathcal{L}$. Furthermore, this eddy has a timescale of $\tau_l \sim l/v_l$. Take now as example
\begin{equation}
\begin{split}
\mathcal{V}=1\\
\mathcal{L}=1\\
l=1
\end{split}
\end{equation}
Then also $v_l \sim 1$ m/s and $\tau_l \sim 1$ s. In the same flow, now consider an eddy that has $\tau_l\sim0.5$ s. What is the characteristic velocity of this eddy? The problem is that the eddy length scale also varies with the eddy time scale, so both $v_l$ and $l$ are unknown. Does anyone have an idea?
Answer: We know that
$$
v_l^3\sim\epsilon l,
$$
so
$$
v_l^2\sim\epsilon\tau_l.
$$
In your case, $\epsilon=1$ and $\tau_l=0.5$s so $v_l=0.71$m/s. | {
"domain": "physics.stackexchange",
"id": 78175,
"tags": "fluid-dynamics, dimensional-analysis, turbulence, scales"
} |
In Q-learning, Am I the one who will define the way in which actions allow the agent to interact with the environment? And the interactions will vary? | Question: In Q-learning, am I the one who will define the way in which actions allow the agent to interact with the environment, so that the way in which actions allow the agent to interact with the environment can vary greatly from the problem in question?
For example, in this article: https://www.learndatasci.com/tutorials/reinforcement-q-learning-scratch-python-openai-gym/, which explains Q-learnig, it teaches the Smartcab problem, it only has 6 actions (walk up, down, right and left, pickup and dropoff). The action of moving upwards makes the agent add +1 to Y, advancing its state. In this Smartcab example, the states are positions X and Y representing where the agent is.
But the way in which actions allow the agent to interact with the scenario can be something much broader depending on the problem?, instead of being movement actions (such as walking up, down, right and left), instead of Furthermore, are they more complex actions, which could make the agent change state in a very different way than this Smartcab example?
In Q-learning the way in which the actions will make the agent interact with the environment will depend greatly on the problem in question, so that each problem can have its own rules for the agent to interact with the environment, rules that I myself Can I set it according to my needs?
Answer: The action space for a reinforcement learning (RL) task is mostly determined by the problem you are applying RL to. As such, you get to define it in the following ways:
By choosing what problem to work on
By writing a formal description or code for the environment
By encoding a suitable representation of the action space for the agent code to interact with
The second and third items here are constrained by the ones above them. Other than that, RL as a whole is a high level and generalised description of learning by trial and error that applies to a very broad number of cases. Q-learning specifically may not be applicable to all of them, and is one of many solution methods.
Understanding which RL methods are suitable for a specific environment takes a bit of study. One of the factors that affects which methods work best is the nature of the action space. However, if you are just concerned about defining a similar number of discrete actions to TaxiCab, that happen to do something entirely different in a new problem of a similar size, then Q-learning should still apply, and you can absolutely decide on a different set of actions with entirely different consequences to the environment. | {
"domain": "ai.stackexchange",
"id": 4039,
"tags": "q-learning"
} |
How can a single photon or electron create a small visible dot on a photosensitive plate? | Question: The photon or electron is just one subatomic particle, but if it hits the film and creates a dot visible to the human eye (btw, modern technology can do this), then the dot must be a collection of millions of atoms or molecules on the screen that have been transformed via chemical reactions triggered by that single photon or electron.
How exactly can it happen?
(Presumably the photon hits just one atom in the photosensitive plate (as in a double slit experiment), thus changing only that one single atom, a happening that is still microscopic and invisible to the human eye.)
Edit: If I rephrase the question and say "a few photons", they are still completely microscopic and my question will be the same.
Answer:
but if it hits the film and creates a dot visible
I guess that You think of a classical film made from
silver halide crystals in gelatin?
Your assumption is quite good, only the "atom" is not
the right thing.
Research on the most sensitive films showed that about
4 absorbed photons are needed to transform one silver halide
crystal into the "latent" picture form.
This form is a electron trapped in some crystal imperfection,
called a "trap" (trace amounts of sulfide are important for this)
This "latent" sensiticed crystal then is preferably reduced
to silver when the film is immersed into the developer,
a catalytic effect.
Crystals without this trapped electrons will not be reduced,
at least not within the usual temperature and time used
for that development.
What is important here, this "development" (reaction with a
reducing chemical) is a amplifying process,
reducing millions of silver atoms (the whole crystal)
induced by one of that trapped electrons.
PS
The geiger counter tube and the photomultiplier mentioned in the
comments above are good examples for similar action, because
both contain "built in" amplifiers, but physical, whereas the
photographic film was chemically amplified. | {
"domain": "physics.stackexchange",
"id": 19286,
"tags": "photons"
} |
Work of the same force acting once on a moving object and once on a motionless one | Question: If the same force $F$ acted once on an already moving object and once on a motionless object. would it give more energy to the moving object than to the motionless one?. Note that both objects have the same mass and are similar.
according to the work formula $W = Fd$ (in this question work and displacement are in the same direction), the force will give more energy to the moving body because its displacement will be larger due to the fact that it already had some speed before the force acted on it. So is it true that a moving object gains more energy from the same force?
Note that in both cases the force acts for the same time.
Answer: Yes, the work will be larger if the object is already moving in the direction of the force.
The mechanical power (work per time) is given as:
$$P = \frac{\Delta W}{\Delta t} = \frac{Fd}{\Delta t} = F v$$
where $v = \frac{d}{\Delta t}$ is the velocity.
So, yes, it costs extra energy to apply the same force to an already moving object than if it was at rest!
That might be unintuitive at first. It can help to remember that kinetic energy is $E=\frac{1}{2} mv^2$. Because velocity is squared, increasing the speed from, say, 0 to 1 m/s costs significantly less energy than increasing it from 100 m/s to 101 m/s.
If you confused by this, you are not alone. For instance, rocket engines puzzled engineers & physicists for a long time. While it is burning, the engine produces a roughly constant thrust. According to the equation above, the power $P=Fv$ delivered by the engine will increase as the rocket accelerates. Eventually, the power will exceed the chemical energy released by burning the fuel. (I am not going to spoil the fun by posting the solution to that paradoxon right here!) | {
"domain": "physics.stackexchange",
"id": 78448,
"tags": "newtonian-mechanics, energy, kinematics, reference-frames, work"
} |
Water pressure at the bottom of a box changes drastically depending on whether a water column above is connected or not? | Question: (For the purposes of this question I'm ignoring atmospheric pressure completely)
Consider a hollow cubic box with side length $1\text{m}$ that we fill up with water. Now suppose we make a small opening of $1\text{mm}^2$ and on top of this we attach a square rigid pipe of length $1000\text{m}$ and cross sectional area also $1\text{mm}^2$ (so its cross section has $1\text{mm}$ long sides). This pipe holds $1000\text{cm}^3 = 1\text{L}$ of water. When we fill up both the box and the pipe with water the pressure due to the water at any point on the bottom of the box will be $\rho gh = 1000\times9.81\times1001 Pa = 9.82 \text{MPa}$
However now if instead of cutting a hole in the box and then placing the pipe on top of it I instead just fill everything with water and place the pipe on top of the box (so that there is no way for water to move from one to the other) then what I have is just a $1\text{m}^3$ box full of water with a $1\text{kg}$ mass on top of it (the mass of the full pipe) which puts a force of $g = 9.81\text{N}$ on the top of the box. Since the water at the top of the box must have sufficient pressure to resist this force and the area of the top of the box is $1\text{m}^2$, the pressure at the top of the box must be $9.81/1 = 9.81Pa$ and therefore the pressure at the bottom of the box will be this pressure plus the pressure from $1m$ of water (which is $\rho gh = 9.81 \text{kPa}$) which totals to only $9.82 \text{kPa}$.
(See diagram below showing these two cases pictorally)
This seems very strange to me: if we don't cut out the $1\text{mm}^2$ hole then the pressure at the bottom of the box is $9.82 \text{kPa}$, but if we do make this tiny cut the pressure suddly jumps up by a factor of $1,000$, turning into $9.82 \text{MPa}$. This is so unintuitive to me that I feel like I must be doing something wrong here. So, where exactly am I messing up, or should I be updating my intuition?
Answer: A major issue with your thought experiment is that, in your two scenarios, you seem to be making two different and contradictory assumptions about the rigidity of the box (and/or the compressibility of the fluid).
In your first scenario, you seem to be implicitly assuming that the walls of the box (and the pipe) are perfectly rigid. If the top and sides of the box can deform even a little bit under the enormous pressure of the 1 km column of water, they'll flex outwards and expand the volume of the box until most of the water in the pipe (of which there is only one liter) has flowed into the box, drastically reducing the pressure.
In your second scenario, however, your reasoning only works if the box is not perfectly rigid. If it was, the weight of the pipe on top would be supported fully by the box, and none of it would be transferred into the water inside the box. Instead, you seem to be assuming that the top of the box is effectively floating on the water inside, so that the full weight of the pipe is transferred to the water.
Furthermore, if the box was perfectly rigid, fully closed and filled with incompressible fluid, the pressure inside in scenario 2 would actually be indeterminate! You can see this by observing that, since the box is rigid and the fluid incompressible, we can fill the box with fluid to any pressure before sealing it, and the volume of fluid inside the box will be the same! Thus, under these assumptions, just knowing the shape of the box and the amount of fluid inside is not enough to determine the pressure.
Obviously that's not a physically meaningful scenario, but we can regard it as an approximation of a situation where the box is almost rigid and/or the fluid almost incompressible.
In particular, let's assume that there's a valve at the top of the box, where the pipe will connect, which we can open and close at will.
We will first remove the pipe and pour water in through the top valve until the box is full of water and the pressure at the top of the box equals ambient pressure. As you've calculated, the pressure at the bottom of the box will then be (ambient pressure plus) 9.81 kPa, i.e. the pressure under one meter of water.
If we now close the valve, the pressure inside the box will not change. Now we plug in the 1 km × 1 mm² pipe into the (closed) valve and fill that with water too.
The pressure at the bottom of the pipe (above the closed valve) will now be 9.81 MPa. (We assume the pipe and the valve somehow withstand this pressure.) The only thing that has changed below the valve, however, is that there's now an extra 1 kg weight of water (plus the weight of the empty pipe itself, which realistically would of course be way more than 1 kg) resting on top of the box.
Since we assumed the box to be almost perfectly rigid, we can assume that the box will support most of this weight by itself, and thus the pressure of the water inside should not appreciably change. However, even if the entire weight of the water column was somehow transferred through the top of the box to the water below — maybe the "top" is actually a piston supported by the water below, but otherwise free to slide up and down? — that would still only increase the pressure inside the box by 9.81 Pa, i.e. from 9.81 to 9.81981 ≈ 9.82 kPa.
Now let's open the valve. What happens?
If the top of the box was actually a floating piston (of negligible mass, just like the pipe), what would happen is that the liter of water in the pipe would simply drain into the box, while the piston would rise by 1 mm to accommodate it. The pressure at the bottom of the box would still be 9.82 kPa just like before opening the valve.
However, let's go back to our initial assumption of a nearly rigid box. When the valve is opened, the 9.81 MPa pressure at the bottom of the pipe is now transferred to the water in the box, and through it to the sides of the box. That's a lot of pressure pushing the sides outwards, and since they're only almost rigid, they'll still deform a little. And they only need to move a fraction of a millimeter for the box to expand enough to fit the extra liter of water from the pipe.
Even if the walls of the box were really rigid, and could withstand a pressure of nearly 10 MPa without moving even a fraction of a millimeter, the water in the box is only nearly incompressible. The bulk modulus of water is around 2.2 GPa, so at a pressure of 10 MPa the volume of water decreases by about 0.45%. Since the volume of the pipe is only 0.1% of the volume of the box, however, that's more than enough for all the water in the pipe to fit into the box even without the walls flexing at all.
Of course, as water drains out of the pipe and into the box, the pressure in the box will drop until the system attains an equilibrium, with a pressure at the bottom of the box somewhere strictly between 9.82 kPa and 9.82 MPa.
The exact equilibrium pressure depends on the rigidity of the box and the compressibility of the fluid in it. Assuming a perfectly rigid box and a bulk modulus of 2.2 GPa for water, we can in fact calculate the equilibrium height of the water column as about 180 m, with a pressure at the bottom of about 1.8 MPa, which is enough to compress the volume of the water in the box by about 0.08%, or just enough to accommodate the extra 820 ml of water drained from the pipe.
Ps. What if we now close the valve again?
The pressure on both sides of the valve is now the same, so nothing really changes. The box still contains approximately a cubic meter of water under high pressure (up to 1.8 MPa, which is a lot, but nowhere near the 9.8 MPa we'd get for the open-valve equilibrium pressure if we assumed the water to be perfectly incompressible and the box perfectly rigid), while the pipe still has enough water in it to maintain the same pressure on the other side of the valve (i.e. a column about 180 m high, which is also a lot, but nowhere near 1 km). | {
"domain": "physics.stackexchange",
"id": 100440,
"tags": "homework-and-exercises, forces, pressure, fluid-statics, density"
} |
Is the Moon's orbital plane angle relative to the ecliptic constant? | Question: I've come across a number of sources (example) saying some variation of this:
The moon's orbital path around Earth is tilted by 5.1 degrees with respect to the orbit of the Earth around the sun.
This isn't strictly true though, right? Per this article, the angle of the moon's orbit varies by something like +/-5 degrees. So really we should say "The moon's orbital path around Earth is tilted by up to 5.1 degrees".
Is this a correct understanding?
Answer: Not quite.
The angle of the Moon's orbital plane with respect to the ecliptic is fairly constant at ~5.1° with a bit of variation as pointed out by @PM2Ring in their comment. The orbit and the plane defined by it does not change (much).
Over the duration of one month the Moon moves along its orbit. Thus its position changes during these days from a position about 5.1 degrees below the ecliptic to on on the ecliptic 7 days later to one 5.1 degrees above 14 days later - and back to -5.1° when the orbit is completed one month later.
Thus:
The Moon's orbital path around Earth is tilted by 5.1 degrees.
The Moon's position varies up to 5.1 degrees below and above the ecliptic. | {
"domain": "astronomy.stackexchange",
"id": 7361,
"tags": "the-moon, orbit, precession"
} |
How is it possible to exert a force on a static object? | Question: Assuming mass doesn't change, force is defined as mass * acceleration. Acceleration is the change in velocity as time changes. How is it possible then to exert a force on an object that doesn't move? If velocity doesn't change, then acceleration must be 0.
Answer: It is only NET force that is equal to mass times acceleration, not each individual force. You need to take the vector sum of the forces acting on a body to get the net force. It is possible to exert a force on a body that doesn't move if the resultant of that force together with all the other forces acting on the body sum to zero. (P.S. Michigan....GO BLUE) | {
"domain": "physics.stackexchange",
"id": 30535,
"tags": "newtonian-mechanics, classical-mechanics"
} |
Error in launching Gazebo in AUTOWARE? | Question:
Operating system and version:
Ubuntu 18.04
gcc (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0
cmake version 3.10.2
Autoware installation type
Built from source
Autoware version 1.12.0-alpha2
ROS distribution:
melodic
ROS installation type:
sudo apt-get install ros-melodic-desktop-full
Gazebo 9
I am trying to launch gazebo using autoware and I am getting this following error:
/home/king/autoware/ros/install/runtime_manager/lib/runtime_manager/runtime_manager_dialog.py:2865: wxPyDeprecationgarning: Call to deprecated item.
wx.InitAllImageHandlers()
loading param.yaml
loading qs.yaml
Subscribe[localization] topic=/ndt_stat, key=/ndt_stat.NDTStat.exe_time
Subscribe[detection] topic=/topic2, key=/topic2
Subscribe[detection] topic=/topic1, key=/topic1
loading setup.yaml
loading map.yaml
loading sensing.yaml
loading computing.yaml
loading interface.yaml
loading data.yaml
loading simulation.yaml
['rosparam', 'set', '/use_sim_time', 'false']
loading status.yaml
loading state.yaml
loading topics.yaml
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.989: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.990: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.990: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.991: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.991: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.992: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.993: Negative content width -13 (allocation 3, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.993: Negative content width -12 (allocation 4, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.993: Negative content width -12 (allocation 4, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.994: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.994: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content width -1 (allocation 1, extents 1x1) while allocating gadget (node border, owner GtkFrame)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content width -1 (allocation 1, extents 1x1) while allocating gadget (node border, owner GtkFrame)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content width -1 (allocation 1, extents 1x1) while allocating gadget (node border, owner GtkFrame)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content width -1 (allocation 1, extents 1x1) while allocating gadget (node border, owner GtkFrame)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content width -1 (allocation 1, extents 1x1) while allocating gadget (node border, owner GtkFrame)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.995: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.996: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.996: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.996: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.996: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.996: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.996: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.996: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.996: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.997: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.997: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.999: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:46.999: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.000: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.000: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.000: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node button, owner GtkToggleButton)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.001: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.001: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.001: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.001: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.001: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.002: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node button, owner GtkToggleButton)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.002: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.002: Negative content height -1 (allocation 11, extents 6x6) while allocating gadget (node button, owner GtkToggleButton)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content width -5 (allocation 1, extents 3x3) while allocating gadget (node scale, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content height -5 (allocation 1, extents 3x3) while allocating gadget (node scale, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: for_size smaller than min-size (0 < 3) while measuring gadget (node trough, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-CRITICAL **: 23:55:47.011: gtk_box_gadget_distribute: assertion 'size >= 0' failed in GtkScale
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content height -2 (allocation 0, extents 1x1) while allocating gadget (node trough, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content width -5 (allocation 1, extents 3x3) while allocating gadget (node scale, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content height -5 (allocation 1, extents 3x3) while allocating gadget (node scale, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: for_size smaller than min-size (0 < 3) while measuring gadget (node trough, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-CRITICAL **: 23:55:47.011: gtk_box_gadget_distribute: assertion 'size >= 0' failed in GtkScale
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content height -2 (allocation 0, extents 1x1) while allocating gadget (node trough, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content width -5 (allocation 1, extents 3x3) while allocating gadget (node scale, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content height -5 (allocation 1, extents 3x3) while allocating gadget (node scale, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: for_size smaller than min-size (0 < 3) while measuring gadget (node trough, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-CRITICAL **: 23:55:47.011: gtk_box_gadget_distribute: assertion 'size >= 0' failed in GtkScale
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content height -2 (allocation 0, extents 1x1) while allocating gadget (node trough, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content width -5 (allocation 1, extents 3x3) while allocating gadget (node scale, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content height -5 (allocation 1, extents 3x3) while allocating gadget (node scale, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: for_size smaller than min-size (0 < 3) while measuring gadget (node trough, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-CRITICAL **: 23:55:47.011: gtk_box_gadget_distribute: assertion 'size >= 0' failed in GtkScale
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content height -2 (allocation 0, extents 1x1) while allocating gadget (node trough, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.011: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.012: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.012: Negative content width -5 (allocation 1, extents 3x3) while allocating gadget (node scale, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.012: Negative content height -5 (allocation 1, extents 3x3) while allocating gadget (node scale, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.012: for_size smaller than min-size (0 < 3) while measuring gadget (node trough, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-CRITICAL **: 23:55:47.012: gtk_box_gadget_distribute: assertion 'size >= 0' failed in GtkScale
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.012: Negative content height -2 (allocation 0, extents 1x1) while allocating gadget (node trough, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.012: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.012: Negative content width -5 (allocation 1, extents 3x3) while allocating gadget (node scale, owner GtkScale)
(runtime_manager_dialog.py:14594): Gtk-CRITICAL **: 23:55:47.012: gtk_box_gadget_distribute: assertion 'size >= 0' failed in GtkScale
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.014: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.014: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.014: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.014: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.014: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.015: Negative content width -15 (allocation 1, extents 8x8) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:47.015: Negative content height -11 (allocation 1, extents 6x6) while allocating gadget (node entry, owner GtkEntry)
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:49.555: Negative content height -1 (allocation 11, extents 6x6) while allocating gadget (node button, owner GtkToggleButton)
['sh', '-c', '$(rospack find runtime_manager)/scripts/gazebo.sh']
pid=14746
sched policy=OTHER prio=0
Failed connect to /tmp/autoware_proc_manager
pid=14749
sched policy=OTHER prio=0
Failed connect to /tmp/autoware_proc_manager
warning: xacro: in-order processing became default in ROS Melodic. You can drop the option.
warning: inconsistent namespace redefinitions for xmlns:xacro:
old: http://ros.org/wiki/xacro
new: http://www.ros.org/wiki/xacro (/opt/ros/melodic/share/velodyne_description/urdf/HDL-32E.urdf.xacro)
warning: inconsistent namespace redefinitions for xmlns:xacro:
old: http://ros.org/wiki/xacro
new: http://www.ros.org/wiki/xacro (/opt/ros/melodic/share/velodyne_description/urdf/VLP-16.urdf.xacro)
warning: redefining global property: pi
when processing file: /home/sai/autoware/ros/install/gazebo_camera_description/share/gazebo_camera_description/urdf/monocular_camera.xacro
included from: /home/sai/autoware/ros/install/vehicle_model/share/vehicle_model/urdf/vehicle.xacro
warning: redefining global property: pi
when processing file: /home/sai/autoware/ros/install/gazebo_imu_description/share/gazebo_imu_description/urdf/imu.xacro
included from: /home/sai/autoware/ros/install/vehicle_model/share/vehicle_model/urdf/vehicle.xacro
warning: redefining global property: pi
when processing file: /home/sai/autoware/ros/install/vehicle_model/share/vehicle_model/urdf/vehicle.xacro
warning: redefining global property: pi
when processing file: /home/sai/autoware/ros/install/vehicle_model/share/vehicle_model/urdf/vehicle.xacro
warning: redefining global property: pi
when processing file: /home/sai/autoware/ros/install/vehicle_model/share/vehicle_model/urdf/vehicle.xacro
warning: redefining global property: pi
when processing file: /home/sai/autoware/ros/install/vehicle_model/share/vehicle_model/urdf/vehicle.xacro
warning: redefining global property: pi
when processing file: /home/sai/autoware/ros/install/vehicle_model/share/vehicle_model/urdf/vehicle.xacro
warning: redefining global property: pi
when processing file: /home/sai/autoware/ros/install/vehicle_model/share/vehicle_model/urdf/vehicle.xacro
warning: redefining global property: pi
when processing file: /home/sai/autoware/ros/install/vehicle_model/share/vehicle_model/urdf/vehicle.xacro
warning: redefining global property: pi
when processing file: /home/sai/autoware/ros/install/vehicle_model/share/vehicle_model/urdf/vehicle.xacro
warning: redefining global property: pi
when processing file: /home/sai/autoware/ros/install/vehicle_model/share/vehicle_model/urdf/vehicle.xacro
warning: xacro.py is deprecated; please use xacro instead
(runtime_manager_dialog.py:14594): Gtk-WARNING **: 23:55:52.154: Negative content height -1 (allocation 11, extents 6x6) while allocating gadget (node button, owner GtkToggleButton)
... logging to /home/sai/.ros/log/36dad7de-9ddc-11e9-8f0d-0242c19a1927/roslaunch-sai-Blade-14751.log
Checking log directory for disk usage. This may take awhile.
Press Ctrl-C to interrupt
Done checking log file disk usage. Usage is <1GB.
started roslaunch server http://sai-Blade:36717/
SUMMARY
========
PARAMETERS
* /autoware_gazebo/joint_state_controller/publish_rate: 50
* /autoware_gazebo/joint_state_controller/type: joint_state_contr...
* /autoware_gazebo/steering_left_front_position_controller/joint: steering_left_fro...
* /autoware_gazebo/steering_left_front_position_controller/pid/d: 10.0
* /autoware_gazebo/steering_left_front_position_controller/pid/i: 100.0
* /autoware_gazebo/steering_left_front_position_controller/pid/p: 100000.0
* /autoware_gazebo/steering_left_front_position_controller/type: effort_controller...
* /autoware_gazebo/steering_right_front_position_controller/joint: steering_right_fr...
* /autoware_gazebo/steering_right_front_position_controller/pid/d: 10.0
* /autoware_gazebo/steering_right_front_position_controller/pid/i: 100.0
* /autoware_gazebo/steering_right_front_position_controller/pid/p: 100000.0
* /autoware_gazebo/steering_right_front_position_controller/type: effort_controller...
* /autoware_gazebo/vehicle_gazebo_info_publisher/enable_base_link_tf: False
* /autoware_gazebo/vehicle_gazebo_input_subscriber/ctrl_cmd: False
* /autoware_gazebo/vehicle_gazebo_input_subscriber/twist_sub: True
* /autoware_gazebo/vehicle_gazebo_input_subscriber/twiststamped: True
* /autoware_gazebo/wheel_left_rear_velocity_controller/joint: wheel_left_rear_j...
* /autoware_gazebo/wheel_left_rear_velocity_controller/pid/d: 10.0
* /autoware_gazebo/wheel_left_rear_velocity_controller/pid/i: 0.01
* /autoware_gazebo/wheel_left_rear_velocity_controller/pid/p: 10.0
* /autoware_gazebo/wheel_left_rear_velocity_controller/type: velocity_controll...
* /autoware_gazebo/wheel_right_rear_velocity_controller/joint: wheel_right_rear_...
* /autoware_gazebo/wheel_right_rear_velocity_controller/pid/d: 10.0
* /autoware_gazebo/wheel_right_rear_velocity_controller/pid/i: 0.01
* /autoware_gazebo/wheel_right_rear_velocity_controller/pid/p: 10.0
* /autoware_gazebo/wheel_right_rear_velocity_controller/type: velocity_controll...
* /gazebo/enable_ros_network: True
* /robot_description: <?xml version="1....
* /rosdistro: melodic
* /rosversion: 1.14.3
* /use_sim_time: True
* /vehicle_info/maximum_steering_angle: 97.3
* /vehicle_info/minimum_turning_radius: 2.95
* /vehicle_info/vehicle_height: 1.5
* /vehicle_info/vehicle_length: 4.82
* /vehicle_info/vehicle_mass: 5000.0
* /vehicle_info/vehicle_width: 1.81
* /vehicle_info/wheel_base: 2.95
* /vehicle_info/wheel_radius: 0.341
* /vehicle_info/wheel_tread: 1.55
* /vehicle_info/wheel_width: 0.225
NODES
/autoware_gazebo/
controller_spawner (controller_manager/spawner)
robot_state_publisher (robot_state_publisher/robot_state_publisher)
vehicle_gazebo_info_publisher (vehicle_gazebo_simulation_interface/vehicle_gazebo_info_publisher)
vehicle_gazebo_input_subscriber (vehicle_gazebo_simulation_interface/vehicle_gazebo_input_subscriber)
/
gazebo (gazebo_ros/gzserver)
gazebo_gui (gazebo_ros/gzclient)
spawn_urdf (gazebo_ros/spawn_model)
ROS_MASTER_URI=http://localhost:11311
process[gazebo-1]: started with pid [14770]
process[gazebo_gui-2]: started with pid [14775]
process[spawn_urdf-3]: started with pid [14780]
process[autoware_gazebo/controller_spawner-4]: started with pid [14781]
process[autoware_gazebo/robot_state_publisher-5]: started with pid [14782]
process[autoware_gazebo/vehicle_gazebo_input_subscriber-6]: started with pid [14783]
[WARN] [1562190953.079384, 0.000000]: wait_for_service(/autoware_gazebo/controller_manager/load_controller): failed to contact, will keep trying
[ERROR] [1562190955.116131279, 0.154000000]: No p gain specified for pid. Namespace: /autoware_gazebo/gazebo_ros_control/pid_gains/wheel_left_rear_joint
[ERROR] [1562190955.116912991, 0.154000000]: No p gain specified for pid. Namespace: /autoware_gazebo/gazebo_ros_control/pid_gains/wheel_right_rear_joint
[Err] [REST.cc:205] Error in REST request
libcurl: (51) SSL: no alternative certificate subject name matches target host name 'api.ignitionfuel.org'
Originally posted by AM97 on ROS Answers with karma: 139 on 2019-07-03
Post score: 0
Answer:
Thank you for reporting the bug.
I think that it has probably been fixed in the latest version.
https://gitlab.com/autowarefoundation/autoware.ai/visualization/commit/2aab7e9df619e37d7576ea7fa7a7e9832fb02a88
Originally posted by yukkysaito with karma: 41 on 2019-07-03
This answer was ACCEPTED on the original site
Post score: 0
Original comments
Comment by AM97 on 2019-07-04:
@yukkysaito That means Should I update my autoware to 1.12.0-beta.1 from present version of 1.12.0-alpha2 which I am using. If I need to update to 1.12.0-beta.1 Should I delete 1.12.0-alpha2 or If possible without deleting that is there any command to update to 1.12.0-beta1. And After UPdating to 1.12.0-beta1 DO I need to again build by Colcon? | {
"domain": "robotics.stackexchange",
"id": 33332,
"tags": "gazebo, ros-melodic"
} |
Connection issue using nmea_serial_driver | Question:
I'm trying to use the nmea_serial_driver node (from the nmea_navsat_driver package) to read NMEA strings that are coming from a GPS unit I have plugged into a DB-9 serial port on my computer. The GPS is outputting standard GGA sentences at 4=38400 baud. The DB-9 port is COM1 which I'm pretty sure translates to /dev/ttyS0. I'm running this command:
$ rosrun nmea_navsat_driver nmea_serial_driver _port:=/dev/ttyUSB0 _baud:=38400
and I get this error message:
Traceback (most recent call last):
File "/opt/ros/indigo/lib/nmea_navsat_driver/nmea_serial_driver", line 49, in <module>
GPS = serial.Serial(port=serial_port, baudrate=serial_baud, timeout=2)
File "/usr/lib/python2.7/dist-packages/serial/serialutil.py", line 261, in __init__
self.open()
File "/usr/lib/python2.7/dist-packages/serial/serialposix.py", line 278, in open
raise SerialException("could not open port %s: %s:" % (self._port, msg))
serial.serialutil.SerialException: could not open port /dev/ttyS0: [Errno 13] permission denied: '/dev/ttyS0'
I don't know if I'm getting this message because the port hasn't been configured properly, because i don't have permission to touch the port (as the error suggests) or because I've got the wrong port/baudrate altogether. I have run the same command on ttyS1 and I got an identical error message. For reference, I ran the dmesg command:
$ dmesg | grep tty
and the output was the same regardless of whether or not the GPS was plugged in:
[ 0.000000] console [tty0] enabled
[ 0.471014] 00.08: ttyS0 at I/O 0x3f8 (irq = 4, base_baud = 115200) is a 16550A
[ 0.492790] 00.09: ttyS1 at I/O 0x2f8 (irq = 4, base_baud = 115200) is a 16550A
[ 2.216655] cdc_acm 3-11:1.0: ttyACM0: USB ACM device
I suspect that this output can tell me something about the problem, but I don't understand much of it. Any help or advice on how to get the driver working will be greatly appreciated.
Originally posted by M@t on ROS Answers with karma: 2327 on 2016-04-14
Post score: 0
Answer:
Of course, irony would have it that I find the answer to my problem the minute after asking for help. So to summarize...
Problem: I cannot get nmea_serial_driver to read from a ttyS0 comm port, instead getting a permission denied error message (see above).
Cause: The user "administrator" was not a part of the "dialout" group, which is the only group that can access comm ports. You can check this by typing the following (without the <>):
groups <username>
This will print a list of groups the user is a part of. If you don't see "dialout" in the list, then you cannot access com ports and this is the probable cause of the problem.
Solution: To add the user to the "dialout" group type:
sudo usermod -aG dialout <username>
Then log out and back in. Run the "groups" command again to double check that you were added correctly.
Originally posted by M@t with karma: 2327 on 2016-04-14
This answer was ACCEPTED on the original site
Post score: 1 | {
"domain": "robotics.stackexchange",
"id": 24381,
"tags": "ros, gps"
} |
Finite State Machine code | Question: I have been making this FSM today. However, as this is probably the biggest practical program I have ever written in CL, I don't know if there are some things that could be improved, or if using a closure is the best thing here.
Any feedback is appreciated.
;;; States: -- EnterMineAndDigForNugget
;;; -- QuenchThirst
;;; -- GoHomeAndSleepTillRested
;;; -- VisitBankAndDepositGold
(defmacro while (test &rest body)
`(do ()
((not ,test))
,@body))
(defmacro enter (state)
`(setf state ,state))
(defun make-miner ()
(let ((wealth 0)
(thirst 0)
(rested 0)
(state 'EnterMineAndDigForNugget))
(list
(defun EnterMineAndDigForNugget ()
; (setf location 'mine)
(format t "~&Diggin' up gold!")
(incf wealth)
(incf thirst)
(cond ((>= thirst 7) (enter 'QuenchThirst))
(t (enter 'VisitBankAndDepositGold))))
(defun QuenchThirst ()
(format t "~&Drinkin' old good whiskey")
(setf thirst 0)
(enter 'EnterMineAndDigForNugget))
(defun VisitBankAndDepositGold ()
(format t "~&All this gold ought to be stored somewhere!")
(incf wealth)
(cond ((>= wealth 5) (progn
(format t "~&Too much gold for today, let's sleep!")
(enter 'GoHomeAndSleepTillRested)
(setf wealth 0)))
(t (EnterMineAndDigForNugget))))
(defun GoHomeAndSleepTillRested ()
(while (<= rested 3)
(format t "~&Sleepin'")
(incf rested))
(enter 'EnterMineAndDigForNugget)
(setf rested 0))
(defun controller ()
(dotimes (n 30)
(cond ((equal state 'QuenchThirst) (QuenchThirst))
((equal state 'VisitBankAndDepositGold) (VisitBankAndDepositGold))
((equal state 'GoHomeAndSleepTillRested) (GoHomeAndSleepTillRested))
((equal state 'EnterMineAndDigForNugget) (EnterMineAndDigForNugget))))))))
EDIT
I have applied all the suggested changes but for the flet/labels one. Everything worked fine until I changed the one of "set the state to the next function". Now, the macro enter doesn't seem to be ever called.
This is the current state of the code, with the required code to make it work
;;; States: -- enter-mine-and-dig-for-nugget
;;; -- quench-thirst
;;; -- go-home-and-sleep-till-rested
;;; -- visit-bank-and-deposit-gold
(defmacro enter (state)
`(setf state ,state))
(defun make-miner ()
(let ((wealth 0)
(thirst 0)
(rested 0)
(state #'enter-mine-and-dig-for-nugget))
(list
(defun enter-mine-and-dig-for-nugget ()
(format t "~&Diggin' up gold!")
(incf wealth)
(incf thirst)
(if (>= thirst 7)
(enter #'quench-thirst)
(enter #'visit-bank-and-deposit-gold)))
(defun quench-thirst ()
(format t "~&Drinkin' old good whiskey")
(setf thirst 0)
(enter #'enter-mine-and-dig-for-nugget))
(defun visit-bank-and-deposit-gold ()
(format t "~&All this gold ought to be stored somewhere!")
(incf wealth)
(if (>= wealth 5)
(progn
(format t "~&Too much gold for today, let's sleep!")
(enter #'go-home-and-sleep-till-rested)
(setf wealth 0))
(enter #'enter-mine-and-dig-for-nugget)))
(defun go-home-and-sleep-till-rested ()
(dotimes (i 4)
(format t "~&Sleepin'"))
(enter #'enter-mine-and-dig-for-nugget))
(defun controller ()
(dotimes (n 30)
(funcall state))))))
(let ((a (make-miner)))
(funcall (fifth a)))
Answer: DEFUN
The most basic mistake in your code is that DEFUN is not correct for nested functions. DEFUN is a top-level macro, defining a top-level function and should be used in top-level forms.
Nested functions are defined in Common Lisp with FLET and LABELS. LABELS is used for recursive sub-functions.
Naming
Symbols like FooBarBaz are not use in Common Lisp. By default Common Lisp upcases all names internally, so the case information gets lost.
Usually we write foo-bar-baz.
Checking symbols
Use CASE (or ECASE) instead of (cond ((equal foo 'bar) ...)).
Architecture
Usually I would write that piece of code using CLOS, the Common Lisp Object System.
In a more functional style I would propose the following:
use LABELS for the local procedures.
set the state to the next function. A function is written as (function my-function) or #'my-function.
the controller just calls the next function from the state variable. It is not necessary to list all cases. | {
"domain": "codereview.stackexchange",
"id": 688,
"tags": "lisp, common-lisp"
} |
Does the homogeneity of space imply that the expansion of the universe is uniform? | Question: Obviously, homogeneity implies that the density is the same everywhere at any time. However, does this imply that the expansion is uniform? By uniformity, I mean that if I pick three galaxies to form a triangle, then the ratio of the side lengths will never change over time.
EDIT: I have forgotten to add this: if both homogeneity and isotropy are assumed, can we prove that the expansion is uniform?
Answer: No, homogeneity does not implies that the expansion is uniform. Homogeneous expansion could be anisotropic which would lead to different changes in length depending on orientation.
A simple example to demonstrate this is the Kasner metric which is homogeneous but anisotropic. For a $(3+1)$ spacetime this metric could be written in the following form:
$$
ds^2 = - dt^2 +t^{2p_1} dx^2 +t^{2p_2} dy^2 +t^{2p_3} dz^2.
$$
Now let us assume that we have three galaxies at a moment $t=1$ first at origin $(0,0,0)$, second at a point with spatial coordinates $(a,0,0)$, third at a point $(0,b,0)$.
At the moment $t=\tau$ these galaxies would have the following spatial coordinates: first $(0,0,0)$, second $(\tau^{2p_1} a,0,0)$, third $(0,\tau^{2p_2} b ,0)$.
We see that if $p_1\ne p_2$ then the ratio of the distances $d_{1-2}/d_{1-3}$ would be different at different times. | {
"domain": "physics.stackexchange",
"id": 61916,
"tags": "cosmology, spacetime, symmetry, space-expansion, cosmological-inflation"
} |
How to perform encoding and syndrome measurement in stim | Question: I can generate the encoding circuit of a stabilizer code and can read it into
stim. For example for the $[[5,1,3]]$ code :
circuit=stim.Circuit()
circuit.append_operation("H",[1])
circuit.append_operation("CY",[0,4])
circuit.append_operation("H",[2])
circuit.append_operation("CX",[1,4])
circuit.append_operation("H",[3])
circuit.append_operation("CZ",[2,0])
circuit.append_operation("CZ",[2,1])
circuit.append_operation("CX",[2,4])
circuit.append_operation("H",[4])
circuit.append_operation("CZ",[3,0])
circuit.append_operation("CZ",[3,2])
circuit.append_operation("CY",[3,4])
To check this I'd like to encode a random qubit then measure the syndrome for the 4 stabilizers; if everything is correct the syndrome should always be $(0,0,0,0)$.
First step : the "data" qubit is placed on qubit 4 (numbering starts from 0). So the input to
the encoder is $(q0=0,q1=0,q2=0,q3=0,q4=d0)$. $k=1$ for this code so there's only one data qubit.
How would I initialize the input to be of that form?
Second step : I have 4 stabilizers which are just Pauli strings of length 5. I'd like to measure the syndromes and place the result on 4 ancilla qubits. How would I do that and then check that the syndromes are 0?
Answer: You can use the MPP instruction to measure a Pauli product. For example, if one of the prepared stabilizers is $X_1 \cdot Z_2 \cdot Y_3 = +1$ then you can do:
# [... encoding circuit ... ]
# measure stabilizer
MPP X1*Z2*Y3
# and claim it's supposed to have a deterministic result
DETECTOR rec[-1]
If you now sample the detectors of the circuit via circuit.compile_detector_sampler().sample(shots=10) you should get back a numpy array filled with 0s, indicating the system has been prepared into an eigenstate of the stabilizer. If you instead see a 50/50 mix of 0s and 1s, something is wrong.
The next step would be to do that for each stabilizer, and then add noise and confirm that you see the stabilizers flipping:
# [... encoding circuit ... ]
# phase damp qubit 3, potentially flipping the stabilizer
Z_ERROR(0.1) 3
# measure stabilizer
MPP X1*Z2*Y3
# and claim it's supposed to have a deterministic result
DETECTOR rec[-1]
Beware that adding Z 3 is not the same thing as adding Z_ERROR(1) 3. Detectors compute what the expected value is supposed to be, and report deviations from that value arising from noise. Z 3 is part of the expected value calculation, whereas Z_ERROR(1) 3 is part of the noise.
You can use MPP(wrong_result_probability) to make the measurement result itself noisy (with no effect on the qubits).
You may also want to decompose the compound measurement into some underlying gateset, and make each of the individual operations noisy. Stim won't do that for you, but you can of course tell it the decomposed measurement's circuit as well as the noise mechanism instructions around each of its pieces. | {
"domain": "quantumcomputing.stackexchange",
"id": 3195,
"tags": "programming, error-correction, stabilizer-code, stim"
} |
What force push ball outward in rotating tube in inertial frame? | Question: Ball is free to move in hollow tube, if you rotate tube ball will move outward.
What force push ball outward in inertial frame and how ball trajectory looks?
(I know that centrifugal force don't exist in inertial frame, but without this force I can't explain what force move ball outward...)
Answer: The apparent problem arises from the pre-Newtonian prejudice that the motion of a body should be along the direction of the total force exerted on the body in all cases.
That is not the case as it happens for instance for planets around the sun. What matters in giving rise to the actual motion are the applied force(s) and the initial conditions.
Here, the force due to the constraint and the initial data give rise to the expulsion motion in the inertial reference frame.
Here are the details.
Let us assume that the tube is frictionless, so that the reaction $\vec{\phi}$ is normal to the tube -- no force along the tube exists -- and that the angular velocity is constant $\Omega>0$.
I will take advantage of a polar coordinate system $r, \theta$ in the plane containing the rotating tube centered on the rotational axis.
Newton equations read, decomposing (*) the motion along the mobile basis $\vec{e}_r, \vec{e}_\theta$:
$$m\left(\frac{d^2r}{dt^2}- r(t) \Omega^2\right)=0$$
$$m\left(2\Omega \frac{dr}{dt} \right) = \phi$$
where I used $\frac{d\theta}{dt}= \Omega$ constant and $\vec{\phi}= \phi \vec{e}_\theta$.
Notice that no inertial forces take place here as we are dealing with an inertial reference frame.
The first equation determines the motion $r=r(t)$ according to the initial data, and the second one determines $\phi(t)$ accordingly.
The first equation has general solution
$$r(t) = A \sinh (\Omega t) + B \cosh (\Omega t)$$
Let us assume that the ball is at rest in the tube at $t=0$ at distance $r_0>0$ from the axis.
As a consequence,
$r_0 = B$ and $0=\frac{dr}{dt}(0)= A\Omega$ so that $A=0$.
The solution is
$$r(t) = r_0 \cosh(\Omega t)\:.$$
The force due to the tube on the ball is $\phi \vec{e}_\theta$ with
$$\phi(t) = 2mr_0\Omega^2 \sinh(\Omega t) \:.$$
In Cartesian coordinates centered on the rotational axis with $z$ parallel to it, the (curved) trajectory has therefore the form
$$\vec{x}(t) = r_0 \cosh(\Omega t) \cos(\Omega t) \vec{e}_x + r_0 \cosh(\Omega t) \sin(\Omega t) \vec{e}_y\:. $$
(*) As is well known in that representation, the acceleration has the form
$$\ddot{\vec{x}} = \left(\ddot{r}-r\dot{\theta}^2 \right) \vec{e}_r + \left( r\ddot{\theta}+ 2 \dot{r}\dot{\theta}\right)\vec{e}_\theta\:,$$ and thus
2nd Newton's law reads
$$\vec{F} = m\left(\ddot{r}-r\dot{\theta}^2 \right) \vec{e}_r + m\left( r\ddot{\theta}+ 2 \dot{r}\dot{\theta}\right)\vec{e}_\theta\:.$$ | {
"domain": "physics.stackexchange",
"id": 98327,
"tags": "newtonian-mechanics, reference-frames, rotational-dynamics, free-body-diagram, centrifugal-force"
} |
Dominant and recessive epistasis | Question: Can anyone clarify my confusion about that the epistasis seen in "Labradors , an example of recessive or dominant epistasis? ?
I am not getting definite results . It's dominant somewhere and recessive in some.:(
May anyone provide me with some definite sources??
Thank you! !:)
Answer: I agree that the question is unclear. I personally don't know the example of the labrador and would need a reference (or a description from you) to talk about this specific case study.
The following tables are displaying all possible kind of interactions (in a haploid and in a diploid) of two bi-allelic loci. Hope that helps! source (wiki)
For Haploids (the concept of dominant epistasis does not apply)
For Diploids | {
"domain": "biology.stackexchange",
"id": 3930,
"tags": "genetics"
} |
How are dies, diodes, chips and the like separated from each other on a wafer? | Question: I would like to know how the single dies on a wafer are (electrically) separated from each other. Are they even?
More precisely, as the wafer themselves are held on a so-called susceptor (basically just a plate to put the wafers on to be heated) and these susceptors are made out of graphite or silicon carbid, I would assume there must be an electric connection across the entire susceptor/wafer. So, I'd think this is a potential risk for damages?
Answer: The wafer is made of a substrate material, which is common to all of the die. On top of the substrate, the electrical structures are added by depositing small amounts of appropriate material (conductor, semiconductor, etc.) in successive layers. The layout of the individual die on the wafer includes consideration that they will be separated enough on the substrate that any imprecision in the deposition of layers will not electrically connect the different die on the wafer.
(Likewise, for the individual die, part of the design process for each die considers whether the structures on the die can be reliably deposited within the precision of the manufacturing technique to be used.) | {
"domain": "physics.stackexchange",
"id": 80797,
"tags": "semiconductor-physics"
} |
Visualizing Kinect data from two Turtlebots in Gazebo | Question:
Hello everyone.
I am trying to run two Turtlebots in Gazebo and have one observe the other. I encountered the common error of one of the turtlebots only showing up as wheels, but by using a hack I am able to see both Turtlebots fully by closing the Gazebo GUI and reopening it again with rosrun gazebo gui.
The problem arises when I try to have one robot observe the other using its kinect and RVIZ. I can see the topics /camera/image_raw and /camera/depth/points (no namespace) from the turtlebot that originally fully appeared in gazebo. The other robot, though, appears only as wheels in RVIZ even though it is fully visible in Gazebo!
I figure there is two ways of solving this. 1) Actually remedying the problem of one of the Turtlebots not fully appearing originally in Gazebo instead of using the hack(which I have no idea how to do) 2) Observe the Turtlebot that fully appears from the others Kinect. There are topics /robot1/depth/image_raw and /robot1/depth/camera_info being published (and similarly for robot2) but when I set the topic to /robot1/depth/image_raw RVIZ looks for camerainfo on /robot2/depth/camera_info and vice versa and says no camerainfo received. I get just an image of a horizontal white line.
These probably coincide but I am hoping some of you know better than me.
I am using the following launch file to launch the Gazebo world with the 2 turtlebots in it simultaneously:
Where robot1 and robot2.launch are identical files except for the robots and nodes use different namespaces so that I can control the robots separately, and I have successfully written a teleop node to allow me to do this.
So how can I have one Turtlebot observe the other (not just its wheels)? Any ideas would be appreciated.
Thanks for your help!
-Alan
UPDATE
So here is the list of topics being published or subscribed:
/camera/camera_info
/camera/depth/points
/camera/image_raw
/camera/image_raw/compressed
/camera/image_raw/compressed/parameter_descriptions
/camera/image_raw/compressed/parameter_updates
/camera/image_raw/compressedDepth
/camera/image_raw/compressedDepth/parameter_descriptions
/camera/image_raw/compressedDepth/parameter_updates
/camera/image_raw/theora
/camera/image_raw/theora/parameter_descriptions
/camera/image_raw/theora/parameter_updates
/clock
/diagnostics
/diagnostics_agg
/diagnostics_toplevel_state
/gazebo/link_states
/gazebo/model_states
/gazebo/parameter_descriptions
/gazebo/parameter_updates
/gazebo/set_link_state
/gazebo/set_model_state
/robot1/depth/camera_info
/robot1/depth/image_raw
/robot1/imu/data
/robot1/initialpose
/robot1/joint_states
/robot1/move_base_simple/goal
/robot1/odom
/robot1/robot_pose_ekf/odom
/robot1/scan
/robot1/set_hfov
/robot1/set_update_rate
/robot1/turtlebot_node/cmd_vel
/robot1/turtlebot_node/joint_states
/robot1/turtlebot_node/odom
/robot1/turtlebot_node/sensor_state
/robot2/depth/camera_info
/robot2/depth/image_raw
/robot2/imu/data
/robot2/joint_states
/robot2/odom
/robot2/robot_pose_ekf/odom
/robot2/scan
/robot2/set_hfov
/robot2/set_update_rate
/robot2/turtlebot_node/cmd_vel
/robot2/turtlebot_node/joint_states
/robot2/turtlebot_node/odom
/robot2/turtlebot_node/sensor_state
/rosout
/rosout_agg
/tf
As you can see, I have the majority of topics specify for which robot they belong, but not the /camera topics. So, I guess the questions I need to be asking are:
What Gazebo plugin(s) are responsible for publishing the /camera topics? I figure I can use namespaces in them in order to specify to RVIZ which robot's camera feed I want to see.
Why isn't the whole Turtlebot showing up in RVIZ and how can I fix it?
Thanks again!
-Alan
Originally posted by ajhamlet on ROS Answers with karma: 3 on 2013-05-21
Post score: 0
Answer:
Hi,
The problem you have with respect to rviz is due to malformed tf. When you are using multiple robots, you need to resolve the tf data for each of the robots by adding a "tf_prefix". By doing so all the links of the robot have unique names like robot1_tf/base_link, robot2_tf/odom etc and tf trees can be constructed for both robots and rviz can display both the robots.
To resolve the sensor data of the robots you have to provide a namespace for each robot. In the below shown case the scan topic become /robot1/scan and /robot1/camera/image_raw etc and the rviz is able to display the scan data of both the robots.
I use the following launch file:
<launch>
<!-- start gazebo with an empty plane -->
<param name="/use_sim_time" value="true" />
<node launch-prefix="optirun" name="gazebo" pkg="gazebo" type="gazebo" args="$(find angen_gazebo)/worlds/angen_empty.world" respawn="false" output="screen"/>
<node name="gazebo_gui" pkg="gazebo" type="gui" />
<param name="robot_description"
command="$(find xacro)/xacro.py $(find turtlebot_description)/urdf/turtlebot.urdf.xacro" />
<group ns="robot1">
<param name="tf_prefix" value="robot1" />
<include file="$(find angen_turtlebot)/launch/fuerte_final_r1.launch" >
<arg name="init_pose" value="-x 9 -y 5 -z 0.05" />
<arg name="robot_name" value="Robot1" />
</include>
</group>
<group ns="robot2">
<param name="tf_prefix" value="robot2" />
<include file="$(find angen_turtlebot)/launch/fuerte_final_r1.launch" >
<arg name="init_pose" value="-x 9 -y 4 -z 0.05" />
<arg name="robot_name" value="Robot2" />
</include>
</group>
</launch>
fuerte_final_r1.launch:
<launch>
<arg name="init_pose"/>
<arg name="robot_name"/>
<param name="robot_description" command="$(find xacro)/xacro.py '$(find turtlebot_description)/urdf/turtlebot.urdf.xacro'" />
<node name="spawn_turtlebot_model" pkg="gazebo" type="spawn_model" args="$(arg init_pose) -unpause -urdf -param /robot_description -model $(arg robot_name) -robotNamespace $(arg robot_name)" respawn="false" output="screen"/>
<node pkg="diagnostic_aggregator" type="aggregator_node" name="diagnostic_aggregator" >
<rosparam command="load" file="$(find turtlebot_bringup)/config/diagnostics.yaml" />
</node>
<node pkg="robot_state_publisher" type="state_publisher" name="robot_state_publisher" output="screen">
<param name="publish_frequency" type="double" value="30.0" />
</node>
<!-- The odometry estimator -->
<node pkg="robot_pose_ekf" type="robot_pose_ekf" name="robot_pose_ekf">
<param name="freq" value="30.0"/>
<param name="sensor_timeout" value="1.0"/>
<param name="publish_tf" value="true"/>
<param name="odom_used" value="true"/>
<param name="imu_used" value="false"/>
<param name="vo_used" value="false"/>
<!--<param name="output_frame" value="odom"/>-->
</node>
<!-- throttling -->
<node pkg="nodelet" type="nodelet" name="pointcloud_throttle" args="load pointcloud_to_laserscan/CloudThrottle openni_manager" respawn="true">
<param name="max_rate" value="20.0"/>
<remap from="cloud_in" to="/camera/depth/points"/>
<remap from="cloud_out" to="cloud_throttled"/>
</node>
<!-- Fake Laser -->
<node pkg="nodelet" type="nodelet" name="kinect_laser" args="load pointcloud_to_laserscan/CloudToScan openni_manager" respawn="true">
<param name="output_frame_id" value="/camera_depth_frame"/>
<!-- heights are in the (optical?) frame of the kinect -->
<param name="min_height" value="-0.15"/>
<param name="max_height" value="0.15"/>
<remap from="cloud" to="/cloud_throttled"/>
</node>
<!-- Fake Laser (narrow one, for localization -->
<node pkg="nodelet" type="nodelet" name="kinect_laser_narrow" args="load pointcloud_to_laserscan/CloudToScan openni_manager" respawn="true">
<param name="output_frame_id" value="/camera_depth_frame"/>
<!-- heights are in the (optical?) frame of the kinect -->
<param name="min_height" value="-0.025"/>
<param name="max_height" value="0.025"/>
<remap from="cloud" to="/cloud_throttled"/>
<remap from="scan" to="/narrow_scan"/>
</node>
</launch>
Regarding the gazebo problem, many who use robots in gazebo have that problem. I usually do not care about that as long as the robot obeys my navigation commands. But to solve that you can do the following: comment the line in launch file that launches gui, so now you will be launching gazebo without the gui. After launchimg that you can run rosrun gazebo gui and both the robots will be visible right from the start.
Read in detail here.
Originally posted by prasanna.kumar with karma: 1363 on 2013-05-21
This answer was ACCEPTED on the original site
Post score: 3
Original comments
Comment by ajhamlet on 2013-05-22:
Thank you for your response. I think I have already mostly figured out the namespace/tf_prefix issue (actually, using an answer to a question you previously asked). Please see the update to my question for more info on the issue I am having. | {
"domain": "robotics.stackexchange",
"id": 14239,
"tags": "gazebo, simulation, kinect, turtlebot, ros-fuerte"
} |
What happens if you intake pure magnesium? | Question: We know that body needs a certain amount of magnesium. Why are magnesium supplements in the form of magnesium oxide, magnesium citrate, etc?
Answer: Because like a lot of material we need the ion not the pure form. Pure sodium is explosive metal and pure chlorine is horribly toxic gas but change them into their ionic form and they are essential nutrients, sodium chloride, table salt.
In the case of magnesium however the problems one of reactivity and absorption. If you swallowed a solid lump of magnesium, not much will happen, the molarity of stomach acid is fairly low, it reacts too slowly to get significant amount of magnesium (or hydrogen) out of it before it passes. Your body just is not equipped to break down solid magnesium since it does not occur naturally often. We can only absorb it in it's ionized state.
More importantly we take in some ion combinations better than others, this is called bio-availability and is influenced by a number of factors. Magnesium oxide for instance has very low absorption rates and basically just passes through without doing much, while magnesium glycinate has a fantastic absorption rate, some combinations can actually be mildly toxic like magnesium sulfate. This is a reoccurring issue with dietary supplements many do not contain bioavailable forms of the mineral they list, which is how people can end up with deficiencies even while taking supplements for that mineral. | {
"domain": "biology.stackexchange",
"id": 7870,
"tags": "medicine"
} |
how do i set length of message queue for subscriber in rosjava? | Question:
In rosjava we don’t have access to set message queue on subscriber. So rosjava node is not receiving & executing messages in realtime. So anybody here has any workaround to resolve this issue?
Originally posted by stark on ROS Answers with karma: 1 on 2015-02-19
Post score: 0
Answer:
As Kircheis answered in this question, you can set the message queue size when you add a message listener to your subscriber.
The rosjava Listener.java tutorial creates a listener like this:
Subscriber<std_msgs.String> subscriber = connectedNode.newSubscriber("chatter", std_msgs.String._TYPE);
subscriber.addMessageListener(new MessageListener<std_msgs.String>() {
@Override
public void onNewMessage(std_msgs.String message) {
log.info("I heard: \"" + message.getData() + "\"");
}
});
When adding the message listener, you can specify a queue size:
int queueSize = 10;
subscriber.addMessageListener(new MessageListener<std_msgs.String>() {
@Override
public void onNewMessage(std_msgs.String message) {
log.info("I heard: \"" + message.getData() + "\"");
}
}, queueSize);
If you do not specify a queue size, the default size is 1.
Originally posted by grieneis with karma: 16 on 2015-03-13
This answer was ACCEPTED on the original site
Post score: 0 | {
"domain": "robotics.stackexchange",
"id": 20931,
"tags": "rosjava"
} |
Can 2 people give rise to billions? Adam and Eve 2.0 | Question: With with the advancement of GMOs the idea of giving rise to humans from 2 people is possible making questions like this invalid?
How many people are required to maintain genetic diversity?
With constant genome engineering to create genetic diversity on each generation of humans before conception how many generations can come from two people with ideal DNA? It is not inbreeding if the DNA is changed enough?
Answer: You do not need any 'editing', in theory it is possible for two people to give rise to 8 billions people. Early generations would suffer from some inbreeding depression but if you somehow get rid of it through 'editing', then you do not even have this trouble for the very first generation but you will have it for the successive generations. It is impossible to tell how important it will be and whether that will prevent this tiny population to survive though. There are ecological and sociological problems that are probably of great importance too. To make it short, it is not easy to survive alone!
For non-recombining DNA, it is necessarily true that we all currently living copies descent from a single individual (see coalescent theory although it will likely be too an advanced topic for you). This is the case of the Y chromosome (excluding the PAR) and of the mtDNA. We refer to these MRCA as Eve-mtDNA (biblical reference) and Y-MRCA, respectively.
Of course, these two individuals did not live alone, did not live together and did not live at the same time or same place. Also, none of these two individual is in anyway a good turning point for considering him/her the first individual of a new species. Y-MRCA is just the MRCA of all modern Y chromosomes and Eve-mtDNA is the MRCA of all modern mtDNA. The reality for the rest of the genome is more complex due to recombination. For your information, Y chromosome and mtDNA put together represent about $\frac{1}{50}$ of the entire genome.
I doubt this answer end up being of much help to you. You should probably just have a look at an intro course to evolutionary biology such as for example evo101. | {
"domain": "biology.stackexchange",
"id": 8485,
"tags": "human-biology, genetics, ecology, reproduction, theoretical-biology"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.