|
|
import numpy as np |
|
|
import matplotlib.pyplot as plt |
|
|
from matplotlib.animation import FuncAnimation |
|
|
from mpl_toolkits.mplot3d import Axes3D |
|
|
import plotly.graph_objects as go |
|
|
import pandas as pd |
|
|
import json |
|
|
import os |
|
|
|
|
|
|
|
|
c = 299792458 |
|
|
E_mc2 = c**2 |
|
|
TSR = E_mc2 / (1.38e-23) |
|
|
alpha = 1.0 |
|
|
Q = 2 ** (1 / 12) |
|
|
dark_energy_density = 5.96e-27 |
|
|
dark_matter_density = 2.25e-27 |
|
|
collision_distance = 1e-10 |
|
|
Hubble_constant = 70.0 |
|
|
Hubble_constant_SI = ( |
|
|
Hubble_constant * 1000 / 3.086e22 |
|
|
) |
|
|
|
|
|
|
|
|
temperature_initial = 1.42e32 |
|
|
particle_density_initial = 5.16e96 |
|
|
particle_speed_initial = c |
|
|
|
|
|
|
|
|
t_planck = 5.39e-44 |
|
|
t_simulation = t_planck * 1e3 |
|
|
|
|
|
|
|
|
quark_masses = { |
|
|
"up": 2.3e-3, |
|
|
"down": 4.8e-3, |
|
|
"charm": 1.28, |
|
|
"strange": 0.095, |
|
|
"top": 173.0, |
|
|
"bottom": 4.18, |
|
|
} |
|
|
|
|
|
|
|
|
GeV_to_J = 1.60217662e-10 |
|
|
|
|
|
|
|
|
num_steps = int(t_simulation / t_planck) |
|
|
|
|
|
|
|
|
tunneling_probabilities = np.arange(0.1, 1.1, 0.1) |
|
|
|
|
|
|
|
|
data_dir = "big_bang_simulation_data" |
|
|
os.makedirs(data_dir, exist_ok=True) |
|
|
|
|
|
|
|
|
def relativistic_energy(particle_speed, particle_mass): |
|
|
if particle_speed >= c: |
|
|
return np.inf |
|
|
return particle_mass * c**2 / np.sqrt(max(1e-10, 1 - (particle_speed / c) ** 2)) |
|
|
|
|
|
|
|
|
def relativistic_momentum(particle_speed, particle_mass): |
|
|
if particle_speed >= c: |
|
|
return np.inf |
|
|
return ( |
|
|
particle_mass |
|
|
* particle_speed |
|
|
/ np.sqrt(max(1e-10, 1 - (particle_speed / c) ** 2)) |
|
|
) |
|
|
|
|
|
|
|
|
def update_speed(current_speed, current_temperature, particle_mass): |
|
|
rel_momentum = relativistic_momentum(current_speed, particle_mass) |
|
|
return c * np.sqrt( |
|
|
max(1e-10, 1 - (rel_momentum / (rel_momentum + dark_energy_density)) ** 2) |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
correlation_matrices = [] |
|
|
|
|
|
for tunneling_probability in tunneling_probabilities: |
|
|
print(f"Running simulation for tunneling probability: {tunneling_probability}") |
|
|
|
|
|
|
|
|
particle_speeds = np.zeros((len(quark_masses), num_steps)) |
|
|
particle_temperatures = np.zeros( |
|
|
(len(quark_masses), num_steps) |
|
|
) |
|
|
particle_masses_evolution = np.zeros( |
|
|
(len(quark_masses), num_steps) |
|
|
) |
|
|
particle_positions = np.zeros( |
|
|
(len(quark_masses), num_steps) |
|
|
) |
|
|
tunneling_steps = np.zeros( |
|
|
(len(quark_masses), num_steps), dtype=bool |
|
|
) |
|
|
|
|
|
|
|
|
particle_masses = np.array([mass * GeV_to_J for mass in quark_masses.values()]) |
|
|
|
|
|
for j, (quark, mass) in enumerate(quark_masses.items()): |
|
|
particle_masses_evolution[j, 0] = particle_masses[j] |
|
|
particle_positions[j, 0] = 0 |
|
|
|
|
|
for i in range(1, num_steps): |
|
|
particle_speeds[j, i] = update_speed( |
|
|
particle_speeds[j, i - 1], |
|
|
particle_temperatures[j, i - 1], |
|
|
particle_masses[j], |
|
|
) |
|
|
particle_positions[j, i] = ( |
|
|
particle_positions[j, i - 1] + particle_speeds[j, i] * t_planck |
|
|
) |
|
|
|
|
|
value = ( |
|
|
1 |
|
|
- (particle_speeds[j, i] / (TSR * temperature_initial)) |
|
|
+ dark_matter_density |
|
|
) |
|
|
|
|
|
if np.random.rand() < tunneling_probability: |
|
|
particle_speeds[j, i] = particle_speeds[j, 0] |
|
|
tunneling_steps[j, i] = True |
|
|
|
|
|
if value < 0: |
|
|
value = 0 |
|
|
|
|
|
particle_temperatures[j, i] = ( |
|
|
alpha * particle_speeds[j, i] ** 2 |
|
|
) |
|
|
|
|
|
|
|
|
speed_squared_diff = ( |
|
|
particle_speeds[j, i] ** 2 - particle_speeds[j, i - 1] ** 2 |
|
|
) |
|
|
|
|
|
|
|
|
if speed_squared_diff == 0: |
|
|
particle_masses_evolution[j, i] = particle_masses_evolution[j, i - 1] |
|
|
else: |
|
|
|
|
|
energy_diff = relativistic_energy( |
|
|
particle_speeds[j, i], particle_masses[j] |
|
|
) - relativistic_energy(particle_speeds[j, i - 1], particle_masses[j]) |
|
|
|
|
|
|
|
|
if abs(energy_diff) < 1e-15: |
|
|
particle_masses_evolution[j, i] = particle_masses_evolution[ |
|
|
j, i - 1 |
|
|
] |
|
|
else: |
|
|
|
|
|
new_mass = ( |
|
|
particle_masses_evolution[j, i - 1] + energy_diff / c**2 |
|
|
) |
|
|
if np.isfinite(new_mass): |
|
|
particle_masses_evolution[j, i] = new_mass |
|
|
else: |
|
|
particle_masses_evolution[j, i] = particle_masses_evolution[ |
|
|
j, i - 1 |
|
|
] |
|
|
|
|
|
|
|
|
for k in range(j + 1, len(quark_masses)): |
|
|
if ( |
|
|
abs(particle_positions[j, i] - particle_positions[k, i]) |
|
|
< collision_distance |
|
|
): |
|
|
|
|
|
|
|
|
v_rel = particle_speeds[j, i] - particle_speeds[k, i] |
|
|
|
|
|
|
|
|
particle_speeds[j, i] = ( |
|
|
particle_speeds[j, i] |
|
|
* (particle_masses[j] - particle_masses[k]) |
|
|
+ 2 * particle_masses[k] * particle_speeds[k, i] |
|
|
) / (particle_masses[j] + particle_masses[k]) |
|
|
particle_speeds[k, i] = ( |
|
|
particle_speeds[k, i] |
|
|
* (particle_masses[k] - particle_masses[j]) |
|
|
+ 2 * particle_masses[j] * particle_speeds[j, i] |
|
|
) / (particle_masses[j] + particle_masses[k]) |
|
|
|
|
|
|
|
|
max_speed = c * 0.99 |
|
|
particle_speeds[j, i] = np.clip(particle_speeds[j, i], 0, max_speed) |
|
|
particle_speeds[k, i] = np.clip(particle_speeds[k, i], 0, max_speed) |
|
|
|
|
|
|
|
|
particle_temperatures[j, i] = alpha * particle_speeds[j, i] ** 2 |
|
|
particle_temperatures[k, i] = alpha * particle_speeds[k, i] ** 2 |
|
|
|
|
|
|
|
|
particle_speeds[j, i] *= 1 - Hubble_constant_SI * t_planck |
|
|
|
|
|
|
|
|
particle_temperatures[j, i] *= 1 - Hubble_constant_SI * t_planck |
|
|
|
|
|
|
|
|
if np.isnan(particle_speeds[j, i]) or np.isnan(particle_temperatures[j, i]): |
|
|
print(f"NaN detected at step {i} for quark {quark}") |
|
|
print(f"Previous speed: {particle_speeds[j, i - 1]}") |
|
|
print(f"Previous temperature: {particle_temperatures[j, i - 1]}") |
|
|
print(f"Current speed: {particle_speeds[j, i]}") |
|
|
print(f"Current temperature: {particle_temperatures[j, i]}") |
|
|
break |
|
|
|
|
|
|
|
|
particle_speeds[j] = np.clip(particle_speeds[j], 0, c) |
|
|
|
|
|
|
|
|
|
|
|
fig = go.Figure( |
|
|
data=[ |
|
|
go.Scatter3d( |
|
|
x=particle_speeds[j], |
|
|
y=particle_temperatures[j], |
|
|
z=np.arange(num_steps), |
|
|
mode="lines+markers", |
|
|
name=quark.capitalize(), |
|
|
) |
|
|
for j, quark in enumerate(quark_masses.keys()) |
|
|
] |
|
|
) |
|
|
fig.update_layout( |
|
|
title=f"Big Bang Simulation: Temperature vs. Speed (Tunneling Probability: {tunneling_probability})", |
|
|
autosize=False, |
|
|
width=800, |
|
|
height=600, |
|
|
margin=dict(l=65, r=50, b=65, t=90), |
|
|
) |
|
|
fig.show() |
|
|
|
|
|
|
|
|
fig = plt.figure() |
|
|
ax = fig.add_subplot(111, projection="3d") |
|
|
(line,) = ax.plot([], [], [], "b-") |
|
|
|
|
|
|
|
|
ax.set_xlim(min(particle_speeds.flatten()), max(particle_speeds.flatten())) |
|
|
ax.set_ylim( |
|
|
min(particle_temperatures.flatten()), max(particle_temperatures.flatten()) |
|
|
) |
|
|
ax.set_zlim(0, num_steps) |
|
|
|
|
|
ax.set_xlabel("Particle Speed") |
|
|
ax.set_ylabel("Particle Temperature") |
|
|
ax.set_zlabel("Time") |
|
|
ax.set_title( |
|
|
f"Big Bang Simulation Animation (Tunneling Probability: {tunneling_probability})" |
|
|
) |
|
|
|
|
|
def init(): |
|
|
line.set_data([], []) |
|
|
line.set_3d_properties([]) |
|
|
return (line,) |
|
|
|
|
|
def update(frame): |
|
|
line.set_data( |
|
|
particle_speeds[:, :frame].flatten(), |
|
|
particle_temperatures[:, :frame].flatten(), |
|
|
) |
|
|
line.set_3d_properties(np.tile(np.arange(frame), len(quark_masses))) |
|
|
return (line,) |
|
|
|
|
|
ani = FuncAnimation(fig, update, frames=num_steps, init_func=init, blit=True) |
|
|
ani.save(f"big_bang_simulation_3d_{tunneling_probability}.gif", writer="pillow") |
|
|
plt.show() |
|
|
|
|
|
|
|
|
X, Y = np.meshgrid( |
|
|
particle_speeds[0], np.arange(num_steps) |
|
|
) |
|
|
fig = go.Figure( |
|
|
data=[ |
|
|
go.Surface( |
|
|
z=particle_masses_evolution[j], |
|
|
x=X, |
|
|
y=Y, |
|
|
colorscale="Viridis", |
|
|
name=quark.capitalize(), |
|
|
) |
|
|
for j, quark in enumerate(quark_masses.keys()) |
|
|
] |
|
|
) |
|
|
fig.update_layout( |
|
|
title=f"Big Bang Simulation: Mass Evolution (Tunneling Probability: {tunneling_probability})", |
|
|
autosize=False, |
|
|
width=800, |
|
|
height=600, |
|
|
margin=dict(l=65, r=50, b=65, t=90), |
|
|
) |
|
|
fig.show() |
|
|
|
|
|
|
|
|
X, Y = np.meshgrid( |
|
|
particle_speeds[0], np.arange(num_steps) |
|
|
) |
|
|
fig = go.Figure( |
|
|
data=[ |
|
|
go.Surface( |
|
|
z=tunneling_steps[j], |
|
|
x=X, |
|
|
y=Y, |
|
|
colorscale="Blues", |
|
|
name=quark.capitalize(), |
|
|
) |
|
|
for j, quark in enumerate(quark_masses.keys()) |
|
|
] |
|
|
) |
|
|
fig.update_layout( |
|
|
title=f"Big Bang Simulation: Tunneling Effect (Tunneling Probability: {tunneling_probability})", |
|
|
autosize=False, |
|
|
width=800, |
|
|
height=600, |
|
|
margin=dict(l=65, r=50, b=65, t=90), |
|
|
) |
|
|
fig.show() |
|
|
|
|
|
|
|
|
df = pd.DataFrame( |
|
|
{ |
|
|
"Speed": particle_speeds.flatten(), |
|
|
"Temperature": particle_temperatures.flatten(), |
|
|
"Mass": particle_masses_evolution.flatten(), |
|
|
"Tunneling": tunneling_steps.flatten(), |
|
|
} |
|
|
) |
|
|
|
|
|
correlation_matrix = df.corr() |
|
|
correlation_matrices.append(correlation_matrix) |
|
|
|
|
|
print("Correlation Matrix:") |
|
|
print(correlation_matrix) |
|
|
|
|
|
|
|
|
print( |
|
|
f"Calculated masses at the end of the simulation (Tunneling Probability: {tunneling_probability}):" |
|
|
) |
|
|
for j, quark in enumerate(quark_masses.keys()): |
|
|
print(f"{quark}: {particle_masses_evolution[j, -1] / GeV_to_J:.4e} GeV") |
|
|
print("Real masses:") |
|
|
for quark, mass in quark_masses.items(): |
|
|
print(f"{quark}: {mass:.4e} GeV") |
|
|
|
|
|
|
|
|
data_filename = os.path.join( |
|
|
data_dir, f"big_bang_simulation_data_{tunneling_probability:.1f}.json" |
|
|
) |
|
|
data = { |
|
|
"tunneling_probability": tunneling_probability, |
|
|
"particle_speeds": particle_speeds.tolist(), |
|
|
"particle_temperatures": particle_temperatures.tolist(), |
|
|
"particle_masses_evolution": particle_masses_evolution.tolist(), |
|
|
"tunneling_steps": tunneling_steps.tolist(), |
|
|
"correlation_matrix": correlation_matrix.values.tolist(), |
|
|
} |
|
|
with open(data_filename, "w") as f: |
|
|
json.dump(data, f) |
|
|
|
|
|
correlation_matrices = [] |
|
|
for tunneling_probability in tunneling_probabilities: |
|
|
|
|
|
correlation_matrix = df.corr() |
|
|
if correlation_matrix.shape[0] == correlation_matrix.shape[1]: |
|
|
correlation_matrices.append(correlation_matrix) |
|
|
else: |
|
|
print(f"Skipping correlation matrix for tunneling probability {tunneling_probability} because it is not a square matrix.") |
|
|
|
|
|
|
|
|
flat_correlation_matrices = [] |
|
|
for i, matrix in enumerate(correlation_matrices): |
|
|
flattened = matrix.values.flatten() |
|
|
flat_correlation_matrices.append(flattened) |
|
|
|
|
|
|
|
|
flat_correlation_matrices = np.array(flat_correlation_matrices) |
|
|
|
|
|
|
|
|
columns = [f"Corr_{i}_{j}" for i in range(flat_correlation_matrices.shape[1] // 4) for j in range(4)] |
|
|
correlation_matrices_df = pd.DataFrame(flat_correlation_matrices, columns=columns, index=tunneling_probabilities) |
|
|
|
|
|
|
|
|
print("Correlation Matrices for Different Tunneling Probabilities:") |
|
|
print(correlation_matrices_df) |
|
|
|
|
|
|
|
|
|