repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
amuse
|
amuse-main/src/amuse/test/suite/ext_tests/test_rotating_bridge.py
|
import numpy
from amuse.test import amusetest
from amuse.units import units, nbody_system, constants
from amuse.ext.rotating_bridge import Rotating_Bridge, inertial_to_rotating, rotating_to_inertial
from amuse.ext.composition_methods import *
from amuse.datamodel import Particles
class drift_without_gravity(object):
"""
This class evolves the motion of the test particles (no gravity)
"""
def __init__(self, particles, time=0 | units.Myr):
self.particles = particles
self.model_time = time
def evolve_model(self, t_end):
dt = t_end - self.model_time
self.particles.position += self.particles.velocity*dt
self.model_time = t_end
@property
def potential_energy(self):
return quantities.zero
@property
def kinetic_energy(self):
return (0.5*self.particles.mass*self.particles.velocity.lengths()**2).sum()
class TestRotatingBridge(amusetest.TestCase):
def test1(self):
p0 = Particles(1)
p0.position = [0.0, 1.0, 0.0]
p0.velocity = [1.0, 0.0, 0.0]
pos1 = p0.position.copy()
vel1 = p0.velocity.copy()
omega = 1.
N = 0.25
dt = 0.01
method = SPLIT_6TH_SS_M13
tend = N*2*numpy.pi/omega
dt = dt*2*numpy.pi/omega
pr = inertial_to_rotating(0, omega, p0)
drift = drift_without_gravity(pr, time=0)
sys = Rotating_Bridge(omega, timestep=dt, method=method)
sys.add_system(drift)
sys.evolve_model(tend)
pi = rotating_to_inertial(tend, omega, pr)
pos2 = pi.position.copy()
self.assertAlmostEqual(sys.model_time, tend)
self.assertAlmostEqual(pos1+vel1*tend, pos2, 12)
pi.velocity = -pi.velocity
pr = inertial_to_rotating(0, -omega, pi)
drift = drift_without_gravity(pr, time=0)
sys = Rotating_Bridge(-omega, timestep=dt, method=method)
sys.add_system(drift)
sys.evolve_model(tend)
pi = rotating_to_inertial(tend, -omega, pr)
pos3 = pi.position.copy()
self.assertAlmostEqual(pos1, pos3, 12)
def test2(self):
p0 = Particles(1)
p0.position = [0.0, 1.0, 0.0] | units.m
p0.velocity = [1.0, 0.0, 0.0] | units.m/units.s
pos1 = p0.position.copy()
vel1 = p0.velocity.copy()
omega = 1. | units.s**-1
N = 0.25
dt = 0.01
method = SPLIT_6TH_SS_M13
tend = N*2*numpy.pi/omega
dt = dt*2*numpy.pi/omega
pr = inertial_to_rotating(0*dt, omega, p0)
drift = drift_without_gravity(pr, time=0 | units.s)
sys = Rotating_Bridge(omega, timestep=dt, method=method)
sys.add_system(drift)
sys.evolve_model(tend)
pi = rotating_to_inertial(tend, omega, pr)
pos2 = pi.position.copy()
self.assertAlmostEqual(sys.model_time, tend)
self.assertAlmostEqual(pos1+vel1*tend, pos2, 12)
pi.velocity = -pi.velocity
pr = inertial_to_rotating(0*dt, -omega, pi)
drift = drift_without_gravity(pr, time=0 | units.s)
sys = Rotating_Bridge(-omega, timestep=dt, method=method)
sys.add_system(drift)
sys.evolve_model(tend)
pi = rotating_to_inertial(tend, -omega, pr)
pos3 = pi.position.copy()
self.assertAlmostEqual(pos1, pos3, 12)
| 3,371
| 25.34375
| 97
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/ext_tests/test_evrardmodel.py
|
from amuse.test import amusetest
from amuse.ext.evrard_test import new_evrard_gas_sphere
from amuse.units import nbody_system as nbody
from amuse.units import units
class TestEvrardModel(amusetest.TestCase):
def test1(self):
print("First test: making an Evrard gas sphere model.")
target_number_of_particles = 1000
gas_parts = new_evrard_gas_sphere(target_number_of_particles, seed=1234)
self.assertEqual(len(gas_parts), 1000)
def test2(self):
print("Testing properties of an Evrard model.")
target_number_of_particles = 1000
gas_parts = new_evrard_gas_sphere(target_number_of_particles, do_scale=True, seed=1234)
self.assertEqual(len(gas_parts), 1000)
self.assertAlmostEqual(gas_parts.kinetic_energy(), 0.00 | nbody.energy)
self.assertAlmostEqual(gas_parts.potential_energy(G=nbody.G), -0.50 | nbody.energy)
self.assertAlmostEqual(gas_parts.center_of_mass(), [0, 0, 0] | nbody.length)
self.assertAlmostEqual(gas_parts.center_of_mass_velocity(), [0, 0, 0] | nbody.speed)
self.assertAlmostEqual(gas_parts.mass.sum(), 1.00 | nbody.mass)
def test3(self):
print("Testing virial radius of an Evrard model.")
target_number_of_particles = 100
gas_parts = new_evrard_gas_sphere(target_number_of_particles, do_scale=True, seed=1234)
self.assertAlmostEqual(gas_parts.virial_radius(), 1.00 | nbody.length)
| 1,501
| 44.515152
| 95
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/ext_tests/test_grid_to_sph.py
|
import os.path
import numpy
from amuse.test.amusetest import get_path_to_results, TestWithMPI
try:
from matplotlib import pyplot
HAS_MATPLOTLIB = True
from amuse.plot import plot, semilogy, xlabel, ylabel, loglog
except ImportError:
HAS_MATPLOTLIB = False
from amuse.support.exceptions import AmuseException
from amuse.ext.grid_to_sph import Grid2SPH, convert_grid_to_SPH
from amuse.units import units
from amuse.units import generic_unit_system
from amuse.units import nbody_system
from amuse.units import constants
from amuse.units.generic_unit_converter import ConvertBetweenGenericAndSiUnits
from amuse.datamodel import Particles
from amuse.datamodel import Particle
from amuse.datamodel import ParticlesSuperset
from amuse.datamodel import Grid
def create_grid(*arg):
grid = Grid.create(*arg)
grid.add_vector_attribute("momentum", ["rhovx", "rhovy", "rhovz"])
return grid
class TestGrid2SPH(TestWithMPI):
def setup_simple_grid(self):
test_grid = create_grid((4, 3, 2), [1.0, 1.0, 1.0] | units.m)
test_grid.rho = numpy.linspace(1.0, 2.0, num=24).reshape(test_grid.shape) | units.kg/units.m**3
test_grid.rhovx = test_grid.rho * (3.0 | units.m/units.s)
test_grid.rhovy = test_grid.rho * (4.0 | units.m/units.s)
test_grid.rhovz = test_grid.rho * (0.0 | units.m/units.s)
test_grid.energy = test_grid.rho * ((1.0 | (units.m/units.s)**2) + 0.5 * (5.0 | units.m/units.s)**2)
return test_grid
def test0(self):
print("Testing the simple example grid")
test_grid = self.setup_simple_grid()
self.assertEqual(test_grid.position[0][0][0], [1.0/8.0, 1.0/6.0, 1.0/4.0] | units.m)
self.assertEqual(test_grid.position[-1][-1][-1], [7.0/8.0, 5.0/6.0, 3.0/4.0] | units.m)
self.assertEqual(test_grid.momentum[0][0][0], [3.0, 4.0, 0.0] | (units.kg/units.m**3) * (units.m/units.s))
self.assertEqual(test_grid.momentum[-1][-1][-1], [6.0, 8.0, 0.0] | (units.kg/units.m**3) * (units.m/units.s))
self.assertEqual(test_grid.energy[0][0][0], 13.5 | (units.J/units.m**3))
self.assertEqual(test_grid.energy[-1][-1][-1], 27.0 | (units.J/units.m**3))
def test1(self):
print("Testing the converter")
number_of_particles = 10000
test_grid = self.setup_simple_grid()
converter = Grid2SPH(test_grid, number_of_particles)
self.assertTrue(converter.grid is test_grid)
self.assertEqual(converter.shape, (4, 3, 2))
self.assertEqual(converter.number_of_sph_particles, number_of_particles)
self.assertEqual(converter.base_distribution_type, "uniform")
converter.setup_lookup_tables()
converter.setup_variates()
self.assertEqual(converter.cumulative_weight[0], 1.0/(1.5*4*3*2))
self.assertEqual(converter.cumulative_weight[-1], 1.0)
self.assertEqual(converter.position_lookup_table[0], [1.0/8.0, 1.0/6.0, 1.0/4.0] | units.m)
self.assertEqual(converter.position_lookup_table[-1], [7.0/8.0, 5.0/6.0, 3.0/4.0] | units.m)
self.assertEqual(converter.position_lookup_table[9], [3.0/8.0, 3.0/6.0, 3.0/4.0] | units.m)
self.assertAlmostEqual(converter.velocity_lookup_table, [3.0, 4.0, 0.0] | units.m/units.s)
self.assertAlmostEqual(converter.specific_internal_energy_lookup_table, 1.0 | units.J/units.kg)
self.assertEqual(converter.cellsize_unit, units.m)
self.assertTrue(converter.cellsize_unit is units.m)
self.assertAlmostEqual(converter.cellsize_number, [0.25, 1/3.0, 0.5])
self.assertAlmostEqual(converter.mass, 1.5 | units.kg)
# The number of particles in a cell should scale with the amount of mass in the cell:
self.assertAlmostRelativeEqual(
converter.mass * numpy.histogram(converter.indices, bins=4*3*2)[0] * 1.0/number_of_particles,
test_grid.rho.flatten()*test_grid.cellsize().prod(),
places=2
)
def test2(self):
print("Testing the user interface")
number_of_particles = 10000
test_grid = self.setup_simple_grid()
sph_particles = convert_grid_to_SPH(test_grid, number_of_particles)
self.assertEqual(len(sph_particles), number_of_particles)
self.assertAlmostEqual(sph_particles.mass.sum(), 1.5 | units.kg)
self.assertAlmostEqual(sph_particles.velocity, [3.0, 4.0, 0.0] | units.m/units.s)
self.assertAlmostEqual(sph_particles.u, 1.0 | (units.m/units.s)**2)
# The number of particles in a cell should scale with the amount of mass in the cell:
self.assertAlmostRelativeEqual(
(1.5 | units.kg)/number_of_particles * numpy.histogramdd(
sph_particles.position.value_in(units.m), bins=(4, 3, 2))[0],
test_grid.rho*test_grid.cellsize().prod(),
places=2
)
self.assertAlmostEqual(sph_particles.h_smooth, (50.0/number_of_particles)**(1.0/3) | units.m)
def test3(self):
print("Testing the user interface, random base_distribution_type")
number_of_particles = 10000
test_grid = self.setup_simple_grid()
sph_particles = convert_grid_to_SPH(test_grid, number_of_particles,
base_distribution_type="random", seed=12345)
self.assertEqual(len(sph_particles), number_of_particles)
self.assertAlmostEqual(sph_particles.mass.sum(), 1.5 | units.kg)
self.assertAlmostEqual(sph_particles.velocity, [3.0, 4.0, 0.0] | units.m/units.s)
self.assertAlmostEqual(sph_particles.u, 1.0 | (units.m/units.s)**2)
# For 'random', the number of particles in a cell should scale only on average
# with the amount of mass in the cell:
self.assertAlmostRelativeEqual(
((1.5 | units.kg)/number_of_particles * numpy.histogramdd(
sph_particles.position.value_in(units.m), bins=(4, 3, 2))[0]).sum(),
(test_grid.rho*test_grid.cellsize().prod()).sum(),
places=2
)
self.assertRaises(AssertionError,
self.assertAlmostRelativeEqual,
(1.5 | units.kg)/number_of_particles * numpy.histogramdd(sph_particles.position.value_in(units.m), bins=(4, 3, 2))[0],
test_grid.rho*test_grid.cellsize().prod(),
places=2,
)
self.assertAlmostEqual(sph_particles.h_smooth, (50.0/number_of_particles)**(1.0/3) | units.m)
def test4(self):
print("Testing exceptions")
number_of_particles = 10000
test_grid = self.setup_simple_grid()
self.assertEqual(test_grid[0].number_of_dimensions(), 2)
self.assertRaises(AmuseException, convert_grid_to_SPH, test_grid[0], number_of_particles,
expected_message="Grid must be 3D")
self.assertRaises(AmuseException, convert_grid_to_SPH, test_grid,
number_of_particles, base_distribution_type="bogus",
expected_message="Unknown base_distribution_type: bogus. Possible "
"options are: 'random' or 'uniform'.")
| 7,057
| 50.897059
| 134
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/ext_tests/test_kingmodel.py
|
import numpy
from amuse.test import amusetest
from amuse.support.exceptions import AmuseException
from amuse.units import nbody_system
from amuse.units import units
from amuse.ic.kingmodel import new_king_model
class TestKingModel(amusetest.TestCase):
def test1(self):
print("First test: making a King model.")
number_of_particles = 10
particles = new_king_model(number_of_particles, 6.0)
self.assertAlmostEqual(particles.mass.sum(), 1 | nbody_system.mass)
self.assertAlmostEqual(particles[0].mass, 0.1 | nbody_system.mass)
print(particles)
self.assertFalse(particles[0].mass.is_vector())
def test2(self):
print("Testing kinetic and potential energy of a King model realisation.")
convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, 1.0 | units.AU)
number_of_particles = 500
particles = new_king_model(number_of_particles, 6.0, convert_nbody, do_scale=True)
self.assertEqual(len(particles), number_of_particles)
self.assertAlmostEqual(particles[0].mass, (1.0 | units.MSun)/number_of_particles, 3, in_units=units.MSun)
self.assertAlmostEqual(convert_nbody.to_nbody(particles.kinetic_energy()), 0.25 | nbody_system.energy)
self.assertAlmostEqual(convert_nbody.to_nbody(particles.potential_energy()), -0.5 | nbody_system.energy)
def slowtest3(self):
print("King models with varying King dimensionless depth W0.")
number_of_particles = 10
for w_0 in [1.0, 6.0, 11.0, 16.0]:
particles = new_king_model(number_of_particles, W0=w_0)
self.assertEqual(len(particles), number_of_particles)
def test4(self):
print("Testing maximal/minimal value of King dimensionless depth W0.")
number_of_particles = 10
self.assertRaises(AmuseException, new_king_model, number_of_particles, W0=16.5,
expected_message="makeking: must specify w0 < 16")
self.assertRaises(ZeroDivisionError, new_king_model, number_of_particles, W0=0.0)
def test5(self):
print("Testing a specific King model realisation.")
numpy.random.seed(345672)
convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, 1.0 | units.AU)
particles = new_king_model(500, 6.0, convert_nbody)
self.assertEqual(len(particles), 500)
self.assertAlmostEqual(particles.total_mass(), 1.0 | units.MSun)
self.assertAlmostEqual(particles.mass, 1.0 / 500 | units.MSun)
self.assertAlmostEqual(particles.center_of_mass(), [0, 0, 0] | units.AU)
self.assertAlmostEqual(particles.center_of_mass_velocity(), [0, 0, 0] | units.km / units.s)
self.assertAlmostEqual(particles[:3].position, [[-0.23147381, -0.19421449, -0.01165137],
[-0.09283025, -0.06444658, -0.07922396], [-0.44189946, 0.23786357, 0.39115629]] | units.AU)
| 2,888
| 49.684211
| 113
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/reports/test_speed.py
|
from amuse.units import nbody_system
from amuse.units import units
from amuse.test.amusetest import get_path_to_results
from amuse.test import compile_tools
import subprocess
import os
import numpy
import time
from amuse import datamodel
from amuse.rfi.tools import create_c
from amuse.rfi import channel
from amuse.rfi.core import *
codestring = """
#include <stdio.h>
#include <mpi.h>
#include <new>
#include <iostream>
struct data {
double x;
double y;
double z;
};
int number_of_points_in_one_dimension = 0;
data * model = 0;
int set_data(int index, double vx, double vy, double vz)
{
if(!model)
{
return -1;
}
if(index > (number_of_points_in_one_dimension * number_of_points_in_one_dimension * number_of_points_in_one_dimension))
{
return -2;
}
data & m = model[index];
m.x = vx;
m.y = vy;
m.z = vz;
return 0;
}
int get_data(int index, double * vx, double * vy, double * vz)
{
double data_in[6], data_out[6];
int status_in,status_out;
if(!model)
{
return -1;
}
if(index > (number_of_points_in_one_dimension * number_of_points_in_one_dimension * number_of_points_in_one_dimension))
{
return -2;
}
data & m = model[index];
*vx = m.x;
*vy = m.y;
*vz = m.z;
data_in[0] = data_in[1] = data_in[2] = 0.0;
data_in[3] = data_in[4] = data_in[5] = 0.0;
data_out[0] = data_out[1] = data_out[2] = 0.0;
data_out[3] = data_out[4] = data_out[5] = 0.0;
status_in = status_out = 0;
/*
MPI::COMM_WORLD.Allreduce(data_in, data_out, 6, MPI::DOUBLE,MPI::SUM);
MPI::COMM_WORLD.Barrier();
MPI::COMM_WORLD.Allreduce(&status_in, &status_out, 1, MPI::DOUBLE,MPI::SUM);
*/
return 0;
}
int step()
{
if(!model) {
return -1;
}
for(int xindex ; xindex < number_of_points_in_one_dimension; xindex++)
{
for(int yindex ; yindex < number_of_points_in_one_dimension; yindex++)
{
for(int zindex ; zindex < number_of_points_in_one_dimension; zindex++)
{
int index = xindex * number_of_points_in_one_dimension * number_of_points_in_one_dimension;
index += yindex * number_of_points_in_one_dimension;
index += zindex;
model[index].x = index;
model[index].y = model[index].x / (1.0 + model[index].y);
model[index].z = model[index].x * model[index].y / (model[index].z + 1e-7);
}
}
}
return 0;
}
int set_number_of_points_in_one_dimension(int value)
{
if(model) {
delete model;
}
try {
model = new data[value*value*value];
} catch (std::bad_alloc &e) {
number_of_points_in_one_dimension = 0;
return -1;
}
number_of_points_in_one_dimension = value;
return 0;
}
int set_data_to_same(int n, double vx, double vy, double vz) {
for(int i = 0; i < n; i++) {
set_data(i, vx, vy, vz);
}
return 0;
}
int reset()
{
if(model) {
delete model;
}
model = 0;
return 0;
}
"""
class TestCode(CodeInterface):
def __init__(self, exefile):
CodeInterface.__init__(self, exefile)
@legacy_function
def set_number_of_points_in_one_dimension():
"""
Set the set number of points in one dimension (N), the total model
size will be qubed (N*N*N)
"""
function = LegacyFunctionSpecification()
function.addParameter('value',
dtype='int32',
direction=function.IN,
description =
"The number of points in one direction")
function.result_type = 'int32'
return function
@legacy_function
def step():
"""
Do one step over the N * N * N grid
"""
function = LegacyFunctionSpecification()
function.result_type = 'int32'
return function
@legacy_function
def reset():
"""
Restore the model to its original state
"""
function = LegacyFunctionSpecification()
function.result_type = 'int32'
return function
@legacy_function
def set_data():
"""
set example vector data
"""
function = LegacyFunctionSpecification()
function.addParameter('index',
dtype='int32',
direction=function.IN,
description =
"index in the array in range 0 <= index < (N*3)")
function.addParameter('vx',
dtype='float64',
direction=function.IN,
description =
"x component of the vector")
function.addParameter('vy',
dtype='float64',
direction=function.IN,
description =
"y component of the vector")
function.addParameter('vz',
dtype='float64',
direction=function.IN,
description =
"z component of the vector")
function.can_handle_array = True
function.result_type = 'int32'
return function
@legacy_function
def set_data_to_same():
"""
set all vector data to same value
"""
function = LegacyFunctionSpecification()
function.addParameter('max',
dtype='int32',
direction=function.IN,
description =
"index in the array in range 0 <= index < (N*3)")
function.addParameter('vx',
dtype='float64',
direction=function.IN,
description =
"x component of the vector")
function.addParameter('vy',
dtype='float64',
direction=function.IN,
description =
"y component of the vector")
function.addParameter('vz',
dtype='float64',
direction=function.IN,
description =
"z component of the vector")
function.can_handle_array = True
function.result_type = 'int32'
return function
@legacy_function
def get_data():
"""
retrieve example vector data
"""
function = LegacyFunctionSpecification()
function.addParameter('index',
dtype='int32',
direction=function.IN,
description =
"index in the array in range 0 <= index < (N*3)")
function.addParameter('vx',
dtype='float64',
direction=function.OUT,
description =
"x component of the vector")
function.addParameter('vy',
dtype='float64',
direction=function.OUT,
description =
"y component of the vector")
function.addParameter('vz',
dtype='float64',
direction=function.OUT,
description =
"z component of the vector")
function.can_handle_array = True
function.result_type = 'int32'
return function
class RunSpeedTests(object):
def __init__(self):
self.number_of_gridpoints = [8]
def build_worker(self):
path = os.path.abspath(get_path_to_results())
codefile = os.path.join(path,"code.o")
interfacefile = os.path.join(path,"interface.o")
self.exefile = os.path.join(path,"c_worker")
compile_tools.cxx_compile(codefile, codestring)
uc = create_c.GenerateACHeaderStringFromASpecificationClass()
uc.specification_class = TestCode
uc.make_extern_c = False
header = uc.result
uc = create_c.GenerateACSourcecodeStringFromASpecificationClass()
uc.specification_class = TestCode
uc.needs_mpi=False
code = uc.result
string = '\n\n'.join([header, code])
#print string
compile_tools.cxx_compile(interfacefile, string, extra_args=['-I' , path])
compile_tools.c_build(self.exefile, [interfacefile, codefile] )
def start(self):
self.build_worker()
for number_of_points_in_one_dimension in self.number_of_gridpoints:
result = self.run(number_of_points_in_one_dimension)
print(', '.join([str(x) for x in result]))
def run(self, number_of_points_in_one_dimension):
instance = TestCode(self.exefile)
total_number_of_points = number_of_points_in_one_dimension ** 3
number_of_bytes = 4 + 8 + 8 + 8
total_number_of_bytes = total_number_of_points * (number_of_bytes + 4)
indices = numpy.array(range(total_number_of_points), dtype='int32')
data_x = numpy.array(range(total_number_of_points), dtype='float64')
data_y = numpy.array(range(total_number_of_points), dtype='float64')
data_z = numpy.array(range(total_number_of_points), dtype='float64')
errorcode = instance.set_number_of_points_in_one_dimension(number_of_points_in_one_dimension)
if errorcode < 0:
raise Exception("Could not allocate memory")
t0 = time.time()
instance.set_data(indices, data_x, data_y, data_z)
t1 = time.time()
dt = t1 - t0
mbytes_per_second = total_number_of_bytes / dt / (1000.0 * 1000.0)
t2 = time.time()
instance.set_data_to_same(total_number_of_points, 0.0, 1.0, 2.0)
t3 = time.time()
instance.reset()
instance.stop()
return dt, total_number_of_points, mbytes_per_second, t3-t2, (dt - (t3-t2)) / (t3-t2)
def test_speed():
x = RunSpeedTests()
x.number_of_gridpoints = [8]
x.start()
if __name__ == '__main__':
#channel.MessageChannel.DEBUGGER = channel.MessageChannel.DDD
x = RunSpeedTests()
x.number_of_gridpoints = [64, 128, 192]
x.start()
| 10,126
| 27.526761
| 123
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/reports/speed_report.py
|
"""
Runs several tests to determine how fast common operations
are in AMUSE.
to profile (in the amuse root directory):
./amuse.sh -m cProfile -s cumulative test/reports/speed_report.py --n_order==4 --test==speed_copy_to_set > profile.txt
"""
#import numpypy
from amuse.lab import *
#from amuse.datamodel import *
#from amuse.units import nbody_system
from amuse.support.thirdparty import texttable
import traceback
import subprocess
import os
import numpy
import time
import sys
import signal
from optparse import OptionParser
from mpi4py import MPI
from amuse.datamodel import ParticlesSuperset
class TimeoutException(Exception):
pass
class SkipException(Exception):
pass
class RunSpeedTests(object):
@late
def report_lines(self):
return []
@late
def header_line(self):
return ('action', 'duration\n(seconds)')
@late
def row_formatters(self):
return (self.method_to_action, lambda x : x)
@late
def maximum_number_of_seconds_to_allow_per_test(self):
return 1200
def method_to_action(self, x):
name = x.__name__
name = name[len(self.method_prefix):]
name = name.replace('_d_', '.')
name = name.replace('_', ' ')
return name
def __init__(self,
total_number_of_points,
subset_number_of_points = -1,
name_of_the_method = None,
include_code_tests = False,
include_slow_tests = False,
csv_output = False):
self.total_number_of_points = total_number_of_points
if subset_number_of_points < 0:
subset_number_of_points = self.total_number_of_points
self.subset_number_of_points = subset_number_of_points
self.name_of_the_method = name_of_the_method
self.include_code_tests = include_code_tests
self.include_slow_tests = include_slow_tests
self.csv_output = csv_output
def handle_timeout(self, signum, frame):
self.t1=-1
self.t0=0
raise TimeoutException("Test did not finish in allocated time frame")
def start_measurement(self):
self.t0 = time.time()
signal.setitimer(signal.ITIMER_REAL, self.maximum_number_of_seconds_to_allow_per_test)
signal.signal(signal.SIGALRM, self.handle_timeout)
def end_measurement(self):
self.t1 = time.time()
signal.setitimer(signal.ITIMER_REAL, 0)
def run(self):
self.total_time = 0.0
for x in self.names_of_testing_methods():
if not self.name_of_the_method is None:
if x != self.name_of_the_method:
continue
method = getattr(self, x)
print(self.row_formatters[0](method), '...', end=' ', file=sys.stderr)
try:
method()
except TimeoutException as ex:
print("timed out,", ex, file=sys.stderr)
continue
except SkipException as ex:
print("skipped,", ex, file=sys.stderr)
continue
except Exception as ex:
print(ex)
traceback.print_exc()
self.t1=-1
self.t0=0
pass
delta_time = self.t1-self.t0
self.total_time = self.total_time + delta_time
print(self.row_formatters[1](delta_time), file=sys.stderr)
self.report_lines.append((method,delta_time))
if self.csv_output:
self.output_csv_line()
else:
self.output_table()
def output_csv_line(self):
line = []
line.append('N')
line.append('M')
for x in self.report_lines:
line.append(self.method_to_action(x[0]))
print('#' + ','.join(line))
line = []
line.append(str(self.total_number_of_points))
line.append(str(self.subset_number_of_points))
for x in self.report_lines:
line.append(str(x[1]))
print(','.join(line))
def output_table(self):
lines = []
lines.append(':run date:')
lines.append(' {0}'.format(time.asctime()))
lines.append('')
lines.append(':number of points:')
lines.append(' {0}'.format(self.total_number_of_points))
lines.append('')
for x in lines:
print(x)
table = texttable.Texttable()
#table.set_deco(texttable.Texttable.HEADER)
table.set_cols_dtype([
't', # text
'f', # float (decimal)
])
table.set_cols_align(["l", "r"])
rows = []
rows.append(self.header_line)
rows.extend(self.report_lines_as_strings)
table.add_rows(rows)
print(table.draw())
def names_of_testing_methods(self):
for x in sorted(dir(type(self))):
if x.startswith(self.method_prefix):
yield x
@late
def method_prefix(self):
return "speed_"
@late
def report_lines_as_strings(self):
return list(self.iter_report_lines_as_strings())
def iter_report_lines_as_strings(self):
for line in self.report_lines:
yield list(map(lambda x, formatter : formatter(x), line, self.row_formatters))
def speed_make_plummer_sphere(self):
"""plummer sphere"""
self.start_measurement()
new_plummer_model(self.total_number_of_points)
self.end_measurement()
def speed_make_salpeter_mass_distribution(self):
"""plummer sphere"""
self.start_measurement()
new_salpeter_mass_distribution(self.total_number_of_points)
self.end_measurement()
def speed_make_salpeter_mass_distribution_nbody(self):
"""plummer sphere"""
self.start_measurement()
new_salpeter_mass_distribution_nbody(self.total_number_of_points)
self.end_measurement()
def speed_scale_plummer_sphere(self):
input = new_plummer_model(self.total_number_of_points)
self.is_slow_test()
self.start_measurement()
input.scale_to_standard()
self.end_measurement()
def is_slow_test(self):
if not self.include_slow_tests:
raise SkipException("slow tests disabled")
def is_single_particle_test(self):
if not self.include_slow_tests:
raise SkipException("single particle tests disabled")
def speed_calculate_potential_energy(self):
self.is_slow_test()
input = new_plummer_model(self.total_number_of_points)
self.start_measurement()
input.potential_energy(G=nbody_system.G)
self.end_measurement()
def speed_calculate_kinetic_energy(self):
input = new_plummer_model(self.total_number_of_points)
self.start_measurement()
input.kinetic_energy()
self.end_measurement()
def speed_start_and_stop_BHTree_code(self):
self.is_code_test()
self.start_measurement()
code = BHTree()
code.stop()
self.end_measurement()
def speed_add_particles_to_code(self):
self.is_code_test()
code = BHTree()
particles = new_plummer_model(self.total_number_of_points)
particles.radius = 0| nbody.length
self.start_measurement()
code.particles.add_particles(particles)
self.end_measurement()
code.stop()
def speed_add_particles_to_code_SI(self):
"""plummer sphere"""
self.is_code_test()
converter = nbody.nbody_to_si(1 | units.parsec, self.total_number_of_points | units.MSun)
code = BHTree(converter)
particles = new_plummer_model(self.total_number_of_points, converter)
particles.radius = 0| units.RSun
self.start_measurement()
code.particles.add_particles(particles)
self.end_measurement()
code.stop()
def speed_copy_particles_from_code(self):
self.is_code_test()
code = BHTree()
particles = new_plummer_model(self.total_number_of_points)
particles.radius = 0| nbody.length
code.particles.add_particles(particles)
channel = code.particles.new_channel_to(particles)
self.start_measurement()
channel.copy()
self.end_measurement()
code.stop()
def speed_evolve_code_0_d_001_time_in_BHTree(self):
"""plummer sphere"""
self.is_code_test()
self.is_slow_test()
code = BHTree()
particles = new_plummer_model(self.total_number_of_points)
particles.radius = 0| nbody.length
code.particles.add_particles(particles)
self.start_measurement()
code.evolve_model(0.001 | nbody.time)
self.end_measurement()
code.stop()
def speed_evolve_code_0_d_001_time_in_Hermite(self):
"""plummer sphere"""
self.is_code_test()
self.is_slow_test()
code = Hermite()
particles = new_plummer_model(self.total_number_of_points)
particles.radius = 0| nbody.length
code.particles.add_particles(particles)
self.start_measurement()
code.evolve_model(0.001 | nbody.time)
self.end_measurement()
code.stop()
def is_code_test(self):
if not self.include_code_tests:
raise SkipException("code tests disabled")
@late
def maximum_column_widths(self):
maximums = [len(x) for x in self.header_line]
for row in self.report_lines_as_strings:
maximums = [max(len(x), y) for x,y in zip(row, maximums)]
return maximums
@late
def column_widths(self):
return [x + 2 for x in self.maximum_column_widths]
def grid_table_row_separator_line(self, line_character = '-' ):
parts = []
for x in self.column_widths:
parts.append('+')
parts.append(line_character * x)
parts.append('+')
return ''.join(parts)
def grid_table_row_line(self, row):
parts = []
for width, x in zip(self.column_widths, row):
parts.append('|')
parts.append(' ')
parts.append(x.rjust(width-1))
parts.append('|')
return ''.join(parts)
def speed_iterate_over_particles(self):
self.is_single_particle_test()
particles = Particles(self.total_number_of_points)
particles.radius = 1.0 | nbody_system.length
array = numpy.zeros(self.total_number_of_points, dtype=numpy.object)
self.start_measurement()
i = 0
for x in particles:
array[i] = x
i += 1
self.end_measurement()
def speed_iterate_over_particles2(self):
self.is_single_particle_test()
class A(object):
__slots__ = ('i', 'j', 'k' , 'l')
__array_interface__ = {'shape':()}
def __len__(self):
raise AttributeError()
def __iter__(self):
raise AttributeError()
def __init__(self, i, j = 10, k = 20, l = 24):
self.i = i
if i > 10:
self.j = j + 10
else:
self.j = j
self.k = k
self.l = l
def __getattr__(self, name_of_the_attribute):
raise AttributeError("You tried to access attribute '{0}' but this attribute is not defined for this set.".format(name_of_the_attribute, ex))
array = numpy.zeros(self.total_number_of_points, dtype=numpy.object)
self.start_measurement()
for x in range(self.total_number_of_points):
array[x] = A(x,x,x,x)
self.end_measurement()
def speed_iterate_over_array(self):
self.is_single_particle_test()
class Test(object):
def __init__(self):
self.radius = 1.0
particles = [Test() for x in range(self.total_number_of_points)]
self.start_measurement()
for x in particles:
x.radius
self.end_measurement()
def speed_create_N_particles(self):
self.is_single_particle_test()
particles = Particles(self.total_number_of_points)
particles.radius = 1.0 | nbody_system.length
self.start_measurement()
for x in range(self.total_number_of_points):
Particle(x)
self.end_measurement()
def speed_copy_attributes_from_code(self):
self.is_code_test()
code = BHTree()
particles = new_plummer_model(self.total_number_of_points)
particles.radius = 0| nbody.length
code.particles.add_particles(particles)
channel = code.particles.new_channel_to(particles)
self.start_measurement()
channel.copy()
self.end_measurement()
code.stop()
def speed_copy_attributes_from_code_to_empty(self):
self.is_code_test()
code = BHTree()
particles = new_plummer_model(self.total_number_of_points)
particles.radius = 0| nbody.length
empty_particles = particles.empty_copy()
code.particles.add_particles(particles)
channel = code.particles.new_channel_to(empty_particles)
self.start_measurement()
channel.copy()
self.end_measurement()
code.stop()
def speed_copy_mass_attribute_from_code_to_empty(self):
self.is_code_test()
code = BHTree()
particles = new_plummer_model(self.total_number_of_points)
particles.radius = 0| nbody.length
empty_particles = particles.empty_copy()
code.particles.add_particles(particles)
channel = code.particles.new_channel_to(empty_particles)
self.start_measurement()
channel.copy_attribute("mass","zmass")
self.end_measurement()
code.stop()
def speed_copy_position_and_velocity_attributes_from_code_to_empty(self):
self.is_code_test()
code = BHTree()
particles = new_plummer_model(self.total_number_of_points)
particles.radius = 0| nbody.length
empty_particles = particles.empty_copy()
code.particles.add_particles(particles)
channel1 = code.particles.new_channel_to(empty_particles)
channel2 = empty_particles.new_channel_to(code.particles)
self.start_measurement()
channel1.copy_attributes(["x","y","z", "vx","vy","vz"])
channel2.copy_attributes(["x","y","z"])
self.end_measurement()
code.stop()
def speed_copy_to_superset(self):
particles1 = new_plummer_model(self.total_number_of_points)
particles2 = new_plummer_model(self.total_number_of_points)
particles_all = ParticlesSuperset([particles1, particles2])
empty_particles = particles_all.empty_copy()
channel1 = particles_all.new_channel_to(empty_particles)
channel2 = empty_particles.new_channel_to(particles_all)
self.start_measurement()
channel1.copy_attributes(["x","y","z"])
channel2.copy_attributes(["x","y","z"])
self.end_measurement()
def speed_copy_to_set(self):
particles_all = new_plummer_model(self.total_number_of_points * 2)
empty_particles = particles_all.empty_copy()
channel1 = particles_all.new_channel_to(empty_particles)
channel2 = empty_particles.new_channel_to(particles_all)
self.start_measurement()
channel1.copy_attributes(["x","y","z"])
channel2.copy_attributes(["x","y","z"])
self.end_measurement()
def speed_copy(self):
particles_all = new_plummer_model(self.total_number_of_points)
self.start_measurement()
particles_all.copy_to_memory()
self.end_measurement()
def speed_copy_subset(self):
particles_all = new_plummer_model(self.total_number_of_points)
subset = particles_all[:self.subset_number_of_points]
self.start_measurement()
subset.copy_to_memory()
self.end_measurement()
def speed_select_array(self):
particles_all = new_plummer_model(self.total_number_of_points)
self.start_measurement()
particles_selected = particles_all.select_array(lambda position: position.lengths() > 0.5 | nbody_system.length, ["position"])
particles_selected.x
self.end_measurement()
def speed_select_with_get_item(self):
particles_all = new_plummer_model(self.total_number_of_points)
self.start_measurement()
particles_selected = particles_all[particles_all.position.lengths() > 0.5 | nbody_system.length]
self.end_measurement()
def speed_iterate_over_quantity(self):
lengths = numpy.arange(self.total_number_of_points) | nbody_system.length
self.start_measurement()
for x in range(self.total_number_of_points):
lengths[x]
self.end_measurement()
def speed_add_particles(self):
particles_to_add = new_plummer_model(self.total_number_of_points)
step = self.total_number_of_points / 10
particle_sets = []
i = 0
while i < self.total_number_of_points:
j = i + step
if j > self.total_number_of_points:
j = self.total_number_of_points
particle_sets.append(particles_to_add[i:j].copy())
i = j
particles = Particles()
self.start_measurement()
for x in particle_sets:
particles.add_particles(x)
self.end_measurement()
def new_option_parser():
result = OptionParser()
result.add_option(
"-n",
default = 2,
dest="total_number_of_points",
help="lenght of particle set",
type="int"
)
result.add_option(
"-m",
default = -1,
dest="subset_number_of_points",
help="lenght of particles in subset functions",
type="int"
)
result.add_option(
"--test",
default = None,
dest="name_of_the_method",
help="name of the test method to run",
type="string"
)
result.add_option(
"--code",
action="store_true",
default=False,
dest="include_code_tests",
help="also run tests with codes"
)
result.add_option(
"--slow",
action="store_true",
default=False,
dest="include_slow_tests",
help="also run slow tests, scale N**2 or worse, for example calculating the potential energy"
)
result.add_option(
"--csv",
action="store_true",
default=False,
help="display run as one comma separated line",
dest="csv_output"
)
return result
if __name__ == '__main__':
options, arguments = new_option_parser().parse_args()
x = RunSpeedTests(**options.__dict__)
x.run()
| 19,645
| 30.184127
| 157
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/reports/plot_speed_report.py
|
import sys
import numpy
from optparse import OptionParser
try:
from matplotlib import pyplot
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
def select(row, cols_spec):
subspecs = cols_spec.split(',')
subspecs = list(map(str.strip, subspecs))
cols = []
for subspec in subspecs:
parts = subspec.split('-')
if len(parts) == 1:
cols.append(int(parts[0]))
else:
if len(parts[1]) == 0:
end = len(row)
else:
end = int(parts[1])
if end < 0:
end = len(row) + end
cols.extend(list(range(int(parts[0]), end)))
for index in cols:
yield row[index]
def plot_speed_report(input_filename = None, output_filename = None, cols = '0-'):
with open(input_filename, 'r') as stream:
lines = stream.readlines()
header = None
x = []
data = []
for line in lines:
if line.startswith('#'):
header_for_next_line = line[1:].split(',')
header_for_next_line = list(select(header_for_next_line[2:], cols))
if not header is None:
if not header == header_for_next_line:
raise Exception("data does not have same header")
header = header_for_next_line
else:
parts = list(map(str.strip, line.split(',')))
if parts[0]== '':
continue
x.append(int(parts[0]))
numbers = [float(x) for x in parts[2:]]
data.append(list(select(numbers, cols)))
x = numpy.asarray(x)
data = numpy.asarray(data)
print(data.shape)
figure = pyplot.figure(figsize=(9, 4))
subplot = pyplot.subplot(1,2,1)
handles = subplot.plot(x,data)
subplot.plot(x,1e-5 * x)
subplot.legend(
handles,
header,
loc='center left',
bbox_to_anchor=(1.05, 0.5),
ncol=1,
fancybox=False,
shadow=False)
pyplot.loglog()
if output_filename is None:
pyplot.show()
else:
pyplot.savefig(output_filename)
def new_option_parser():
result = OptionParser()
result.add_option(
"-o",
default = None,
dest="output_filename",
help="save figure to output, by default it will display it",
type="string"
)
result.add_option(
"-i",
default = 'report.csv',
dest="input_filename",
help="name of the file to load the data from",
type="string"
)
result.add_option(
"--cols",
default = '0-',
dest="cols",
help="columns to plot, can by 1,2,3 or 0-3 or 0-5, 6, 3",
type="string"
)
return result
if __name__ == '__main__':
options, arguments = new_option_parser().parse_args()
plot_speed_report(**options.__dict__)
| 2,951
| 26.082569
| 82
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/reports/__init__.py
| 0
| 0
| 0
|
py
|
|
amuse
|
amuse-main/src/amuse/test/suite/ticket_tests/test_ticket208.py
|
import numpy
from amuse.test import amusetest
import os.path
from amuse.community.phigrape.interface import PhiGRAPEInterface, PhiGRAPE
from amuse.community.hermite.interface import Hermite
from amuse.community.bhtree.interface import BHTree
from amuse.community.ph4.interface import ph4
from amuse.community.fi.interface import Fi
from amuse.community.gadget2.interface import Gadget2
from amuse import io
from amuse.units import nbody_system
from amuse.units import units
from amuse import datamodel
class TestsForTicket208(amusetest.TestCase):
def _run_addition_removal_test(
self,
instance,
length_unit=nbody_system.length,
speed_unit=nbody_system.speed,
mass_unit=nbody_system.mass
):
instance.initialize_code()
particles = datamodel.Particles(10)
particles.mass = numpy.arange(1, 11) | mass_unit
particles.radius = numpy.arange(1, 11) | length_unit
particles.x = numpy.arange(1, 11) | length_unit
particles.y = numpy.arange(1, 11) | length_unit
particles.z = numpy.arange(1, 11) | length_unit
particles.vx = numpy.arange(1, 11) | speed_unit
particles.vy = numpy.arange(1, 11) | speed_unit
particles.vz = numpy.arange(1, 11) | speed_unit
instance.particles.add_particles(particles)
instance.commit_particles()
self.assertEqual(len(instance.particles), 10)
self.assertAlmostRelativeEquals(instance.particles.mass.as_quantity_in(mass_unit), list(numpy.arange(1, 11)) | mass_unit)
instance.particles.remove_particle(particles[2])
instance.particles.remove_particle(particles[5])
self.assertEqual(len(instance.particles), 8)
self.assertAlmostRelativeEquals(instance.particles.mass.as_quantity_in(mass_unit), [1, 2, 4, 5, 7, 8, 9, 10] | mass_unit)
particles_new = datamodel.Particles(1)
particles_new.mass = 20 | mass_unit
particles_new.radius = 21 | length_unit
particles_new.x = 22 | length_unit
particles_new.y = 23 | length_unit
particles_new.z = 24 | length_unit
particles_new.vx = 25 | speed_unit
particles_new.vy = 26 | speed_unit
particles_new.vz = 27 | speed_unit
instance.particles.add_particles(particles_new)
self.assertEqual(len(instance.particles), 9)
self.assertAlmostRelativeEquals(instance.particles.mass.as_quantity_in(mass_unit), [1, 2, 4, 5, 7, 8, 9, 10, 20] | mass_unit)
self.assertAlmostRelativeEquals(instance.particles.x.as_quantity_in(length_unit), [1, 2, 4, 5, 7, 8, 9, 10, 22] | length_unit)
instance.cleanup_code()
instance.stop()
def test1(self):
instance = Hermite()
self._run_addition_removal_test(instance)
def test2(self):
instance = PhiGRAPE()
self._run_addition_removal_test(instance)
def test3(self):
instance = BHTree()
self._run_addition_removal_test(instance)
def test4(self):
instance = ph4()
self._run_addition_removal_test(instance)
def test5(self):
instance = Fi()
self._run_addition_removal_test(instance)
def test6(self):
length_unit = units.parsec
speed_unit = units.parsec / units.Myr
mass_unit = units.MSun
instance = Gadget2()
self._run_addition_removal_test(instance, length_unit, speed_unit, mass_unit)
| 3,459
| 33.257426
| 134
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/ticket_tests/test_issue123.py
|
from amuse.test import amusetest
from amuse.community.sse.interface import SSE
from amuse.community.bhtree.interface import BHTree
class TestsForIssue123(amusetest.TestCase):
def test1(self): # doesn't trigger recursion error
self.assertRaises(Exception, BHTree, name_of_the_worker="bogus", expected_message="__init__() got multiple values for keyword argument 'name_of_the_worker'")
def test2(self): # does
self.assertRaises(Exception, SSE, name_of_the_worker="bogus", expected_message="__init__() got multiple values for keyword argument 'name_of_the_worker'")
| 593
| 44.692308
| 165
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/ticket_tests/test_github856.py
|
import os
from amuse.test import amusetest
from amuse.datamodel import new_cartesian_grid, Particles
from amuse.io import read_set_from_file, write_set_to_file
class test_github856(amusetest.TestCase):
def test1(self):
filename = os.path.join(self.get_path_to_results(), "github856.amuse")
g1 = new_cartesian_grid((5, 5), 1)
write_set_to_file(g1, filename, "amuse")
del g1
g2 = read_set_from_file(filename, "amuse")
self.assertEquals(g2.get_axes_names(), "xy")
def test2(self):
g1 = Particles(lon=[1, 2], lat=[3, 4])
g1.add_vector_attribute("lonlat", ["lon", "lat"])
g2 = g1.copy()
self.assertEquals(g2.lonlat, [[1, 3], [2, 4]])
def test3(self):
filename = os.path.join(self.get_path_to_results(), "github856_2.amuse")
g1 = Particles(lon=[1, 2], lat=[3, 4])
g1.add_vector_attribute("lonlat", ["lon", "lat"])
write_set_to_file(g1, filename, "amuse")
del g1
g2 = read_set_from_file(filename, "amuse")
self.assertEquals(g2.lonlat, [[1, 3], [2, 4]])
| 1,102
| 31.441176
| 80
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/ticket_tests/test_issue777.py
|
from amuse.test import amusetest
from amuse.units import units
from amuse.ic.brokenimf import new_kroupa_mass_distribution
class TestsForIssue777(amusetest.TestCase):
def test_upper_segment(self):
"Test if a star in the upper mass segment will get the right mass"
lower_limit = 1.0 | units.MSun
upper_limit = 1.0 | units.MSun
mass = new_kroupa_mass_distribution(
1,
mass_min=lower_limit,
mass_max=upper_limit,
)
self.assertEqual(mass[0], 1.0 | units.MSun)
def test_middle_segment(self):
"Test if a star in the middle mass segment will get the right mass"
lower_limit = 0.2 | units.MSun
upper_limit = 0.2 | units.MSun
mass = new_kroupa_mass_distribution(
1,
mass_min=lower_limit,
mass_max=upper_limit,
)
self.assertEqual(mass[0], 0.2 | units.MSun)
def test_lower_segment(self):
"Test if a star in the lower mass segment will get the right mass"
lower_limit = 0.02 | units.MSun
upper_limit = 0.02 | units.MSun
mass = new_kroupa_mass_distribution(
1,
mass_min=lower_limit,
mass_max=upper_limit,
)
self.assertEqual(mass[0], 0.02 | units.MSun)
| 1,308
| 31.725
| 75
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/ticket_tests/test_issue850.py
|
from amuse.test import amusetest
from amuse.datamodel import Particles
from amuse.units import units
from amuse.community.seba import Seba
from amuse.community.bse import Bse
from amuse.support.console import set_printing_strategy
from amuse.units import units
class TestsForIssue850(amusetest.TestCase):
def create_stars_and_binaries(self, binary_pair=[0, 1]):
stars = Particles(3)
stars[0].initial_mass = 14 | units.MSun
stars[1].initial_mass = 10 | units.MSun
stars[2].initial_mass = 9 | units.MSun
stars.mass = stars.initial_mass
binary = Particles(1)
binary.semi_major_axis = 30000 | units.RSun
binary.eccentricity = 0.3
binary.child1 = stars[binary_pair[0]]
binary.child2 = stars[binary_pair[1]]
return stars, binary
def test_do_all_stars_evolve_no_binary(self, code=Seba):
"Test if all stars evolve (no binaries)"
stars, binary = self.create_stars_and_binaries(binary_pair=[0, 1])
instance = code()
instance.particles.add_particles(stars)
channel_stars = instance.particles.new_channel_to(stars)
channel_stars.copy()
end_time = 13500. | units.Myr
instance.evolve_model(end_time)
channel_stars.copy()
# All stars need to have evolved
print(
f"t: {end_time}, m: {stars[0].mass} {stars[1].mass} {stars[2].mass} "
)
for i in range(len(stars)):
self.assertNotEqual(stars[i].mass, stars[i].initial_mass)
instance.stop()
def _test_do_all_stars_evolve_no_binary_bse(self):
self.test_do_all_stars_evolve_no_binary(code=Bse)
def test_does_seba_evolve_stars_not_in_binary(self, code=Seba):
"""
Tests if code evolves any star that is not in a binary if a binary is
added.
"""
set_printing_strategy('default')
stars, binary = self.create_stars_and_binaries(binary_pair=[0, 1])
additional_stars = Particles(5)
additional_stars.original_mass = [11, 20, 4, 2, 1] | units.MSun
additional_stars.mass = additional_stars.original_mass
stars.add_particles(additional_stars)
instance = code()
instance.particles.add_particles(stars)
instance.binaries.add_particles(binary)
channel_stars = instance.particles.new_channel_to(stars)
channel_binary = instance.binaries.new_channel_to(binary)
channel_stars.copy()
channel_binary.copy()
end_time = 13500. | units.Myr
instance.evolve_model(end_time)
channel_stars.copy()
channel_binary.copy()
# All stars need to have evolved
print(
f"t: {end_time}, m: {stars[0].mass} {stars[1].mass} {stars[2].mass} "
)
for i in range(len(stars)):
self.assertNotEqual(stars[i].mass, stars[i].initial_mass)
instance.stop()
def _test_does_bse_evolve_stars_not_in_binary(self):
self.test_does_seba_evolve_stars_not_in_binary(code=Bse)
def test_do_all_stars_evolve_default_binary(self):
"Test if all stars evolve (default order binary)"
stars, binary = self.create_stars_and_binaries(binary_pair=[0, 1])
instance = Seba()
instance.particles.add_particles(stars)
instance.binaries.add_particles(binary)
channel_stars = instance.particles.new_channel_to(stars)
channel_binary = instance.binaries.new_channel_to(binary)
channel_stars.copy()
channel_binary.copy()
end_time = 13500. | units.Myr
instance.evolve_model(end_time)
channel_stars.copy()
channel_binary.copy()
# All stars need to have evolved
print(
f"t: {end_time}, m: {stars[0].mass} {stars[1].mass} {stars[2].mass} "
)
for i in range(len(stars)):
self.assertNotEqual(stars[i].mass, stars[i].initial_mass)
instance.stop()
def test_do_all_stars_evolve_alternative_binary(self):
"Test if all stars evolve (other stars in the binary)"
stars, binary = self.create_stars_and_binaries(binary_pair=[0, 2])
instance = Seba()
instance.particles.add_particles(stars)
instance.binaries.add_particles(binary)
channel_stars = instance.particles.new_channel_to(stars)
channel_binary = instance.binaries.new_channel_to(binary)
channel_stars.copy()
channel_binary.copy()
end_time = 13500. | units.Myr
instance.evolve_model(end_time)
channel_stars.copy()
channel_binary.copy()
# All stars need to have evolved
print(
f"t: {end_time}, m: {stars[0].mass} {stars[1].mass} {stars[2].mass} "
)
for i in range(len(stars)):
self.assertNotEqual(stars[i].mass, stars[i].initial_mass)
instance.stop()
def test_do_all_stars_evolve_alternative_binary_extra_stars(self):
"Test if all stars evolve (other stars in the binary, extra stars)"
stars, binary = self.create_stars_and_binaries(binary_pair=[0, 2])
additional_stars = Particles(5)
additional_stars.original_mass = [11, 20, 4, 2, 1] | units.MSun
additional_stars.mass = additional_stars.original_mass
stars.add_particles(additional_stars)
instance = Seba()
instance.particles.add_particles(stars)
instance.binaries.add_particles(binary)
channel_stars = instance.particles.new_channel_to(stars)
channel_binary = instance.binaries.new_channel_to(binary)
channel_stars.copy()
channel_binary.copy()
end_time = 13500. | units.Myr
instance.evolve_model(end_time)
channel_stars.copy()
channel_binary.copy()
# All stars need to have evolved
print(
f"t: {end_time}, m: {stars[0].mass} {stars[1].mass} {stars[2].mass} "
)
for i in range(len(stars)):
self.assertNotEqual(stars[i].mass, stars[i].initial_mass)
instance.stop()
if __name__ == "__main__":
x = TestsForIssue850()
x.test_do_all_stars_evolve_no_binary()
x.test_does_seba_evolve_stars_not_in_binary()
x.test_do_all_stars_evolve_default_binary()
x.test_do_all_stars_evolve_alternative_binary_extra_stars()
x.test_do_all_stars_evolve_alternative_binary()
| 6,396
| 33.766304
| 81
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/ticket_tests/test_ticket118.py
|
from amuse.test import amusetest
import os.path
from amuse import io
from amuse.units import units
class TestsForTicket118(amusetest.TestCase):
def test1(self):
filename = os.path.join(os.path.dirname(__file__), 'FinalSnapshot.out')
set = io.read_set_from_file(filename, 'dyn')
self.assertEqual(len(set), 10)
def test2(self):
filename = os.path.join(os.path.dirname(__file__), 'FinalSnapshot.out')
root = io.read_set_from_file(filename, 'dyn', return_children=False)
self.assertFalse(root is None)
def test3(self):
filename = os.path.join(os.path.dirname(__file__), 'FinalSnapshot.out')
set = io.read_set_from_file(filename, 'dyn')
print(set)
print("set[0].parent.mass", set[0].parent.mass)
self.assertAlmostRelativeEquals(0.000227826766314251919 * 617.75586357299929284496, set[9].mass.value_in(units.MSun), 12)
self.assertAlmostRelativeEquals(617.75586357299929284496 * 0.953575109205781479, set[0].parent.mass.value_in(units.MSun), 12)
# self.assertTrue(False)
| 1,090
| 34.193548
| 134
|
py
|
amuse
|
amuse-main/src/amuse/test/suite/ticket_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
amuse
|
amuse-main/src/amuse/support/core.py
|
"""
"""
import types
import collections.abc
import re
def compare_version_strings(version1, version2):
def normalize(v):
return [int(x if x.isdigit() else 0) for x in re.sub(r'(\.0+)*$','', v).split(".")]
version1 = normalize(version1)
version2 = normalize(version2)
return (version1 > version2) - (version1 < version2)
class late(object):
"""
An attribute that is set at first access.
The value of the attribute will be determined from the *initializer*
method. The name of the attribute is the same as the name of
the *initializer* method.
A late attribute is comparible with attributes set in the
*__init__* method. Except the value of the late attribute is
determined when first accessed and not when the class is
instantiated.
Typical use to define a managed attribute x:
>>> class C(object):
... @late
... def x(self):
... return "i'm late!"
...
>>> c = C()
>>> print c.x
i'm late!
>>> c.x = "overridden"
>>> print c.x
overridden
:argument initializer: function to determine the initial value of the property
:returns: a descriptor to determine and set the value on first access
"""
def __init__(self, initializer):
self.initializer = initializer
self.__doc__ = self.initializer.__doc__
def __get__(self, instance, owner):
if instance is None:
return self
try:
value = self.initializer(instance)
except Exception as ex:
raise AttributeError(ex)
setattr(instance,self.initializer.__name__, value)
return value
class print_out(object):
"""
Efficient way to contruct large strings.
Strings are build up out of parts. Objects of this
class store these parts while building the string.
Only on request the parts are concatenated into
a large string.
Strings and numbers can be added to the print_out.
For other objects str(object) is called before
adding it to the print_out.
>>> p = print_out()
>>> p + "number of counts : " + 10 #doctest: +ELLIPSIS
<amuse.support.core.print_out object at 0x...>
>>> print p.string
number of counts : 10
All methods return the print_out instance, so that
calls can be chained.
"""
def __init__(self):
self.parts = []
self._indent = 0
self.number_of_characters_on_current_line = 0
def __add__(self, x):
"""Add a new part to the print_out.
"""
if self.isstring(x):
self.parts.append(x)
self.number_of_characters_on_current_line += len(x)
elif self.isnumber(x):
self.parts.append(str(x))
self.number_of_characters_on_current_line += len(str(x))
elif isinstance(x, print_out):
self.parts.extend(x.parts)
else:
part = str(x)
self.parts.append(part)
self.number_of_characters_on_current_line += len(part)
return self
def n(self):
"""Start a new-line, if the current line is not-empty.
>>> p = print_out()
>>> for i in range(3):
... p.n() + i #doctest: +ELLIPSIS
...
<amuse.support.core.print_out object at 0x...>
<amuse.support.core.print_out object at 0x...>
<amuse.support.core.print_out object at 0x...>
>>> print p.string
0
1
2
"""
if not self.parts:
return self
if self.parts[-1] == '\n':
return self
self.lf()
return self
def indent(self):
"""Increase the indent. The next and following lines
will start indented.
>>> p = print_out()
>>> p + "01" #doctest: +ELLIPSIS
<amuse.support.core.print_out object at 0x...>
>>> p.indent().lf() + "2" #doctest: +ELLIPSIS
<amuse.support.core.print_out object at 0x...>
>>> p.lf() + "3" #doctest: +ELLIPSIS
<amuse.support.core.print_out object at 0x...>
>>> print p.string
01
2
3
"""
self._indent += 1
return self
def dedent(self):
"""Decrease the indent. The next line will start dedented.
>>> p = print_out()
>>> p + "01" #doctest: +ELLIPSIS
<amuse.support.core.print_out object at 0x...>
>>> p.indent().lf() + "2" #doctest: +ELLIPSIS
<amuse.support.core.print_out object at 0x...>
>>> p.dedent().lf() + "01" #doctest: +ELLIPSIS
<amuse.support.core.print_out object at 0x...>
>>> print p.string
01
2
01
"""
self._indent -= 1
return self
def lf(self):
"""Start a new-line"""
self.parts.append('\n')
self.number_of_characters_on_current_line = 0
self.do_indent()
return self
def lf_noindent(self):
"""Start a new-line"""
self.parts.append('\n')
self.number_of_characters_on_current_line = 0
return self
def do_indent(self):
for ignore in range(self._indent):
self.parts.append(self.indent_characters())
self.number_of_characters_on_current_line += len(self.indent_characters())
def indent_characters(self):
""" The indent characters, by default 2 spaces.
Override this method to change the indent characters.
"""
return ' '
def __str__(self):
return ''.join(self.parts)
@property
def string(self):
"""String version of the print_out.
"""
return str(self)
def isstring(self, x):
return isinstance(x,bytes)
def isnumber(self, x):
return isinstance(x,int) or isinstance(x,float)
class OrderedDictionary(object):
"""A dictionary that keeps the keys in the dictionary in order.
Ordered dictionaries are just like regular dictionaries but they remember the
order that items were inserted. When iterating over an ordered dictionary,
the values are returned in the order their keys were first added.
>>> d = OrderedDictionary()
>>> d["first"] = 0
>>> d["second"] = 1
>>> d["third"] = 2
>>> [x for x in d]
[0, 1, 2]
"""
def __init__(self):
self.mapping = {}
self.orderedKeys = []
def __setitem__(self, key, value):
if key in self.mapping:
self.mapping[key] = value
return
self.orderedKeys.append(key)
self.mapping[key] = value
def __getitem__(self, key):
return self.mapping[key]
def __contains__(self, key):
return key in self.mapping
def __iter__(self):
return iter(self.values())
def __len__(self):
return len(self.orderedKeys)
def __str__(self):
result = 'OrderedDictionary({'
elements = []
for x in self.keys():
elements.append(repr(x) + ':' + repr(self[x]))
result += ', '.join(elements)
result += '})'
return result
def __repr__(self):
return str(self)
def iterkeys(self):
return iter(self.orderedKeys)
def itervalues(self):
for x in iter(self.orderedKeys):
yield self.mapping[x]
def iteritems(self):
for x in self.orderedKeys:
yield x, self.mapping[x]
def keys(self):
return list(self.orderedKeys)
def pop(self, key):
index = self.orderedKeys.index(key)
del self.orderedKeys[index]
return self.mapping.pop(key)
def values(self):
return [self.mapping[x] for x in self.orderedKeys]
def items(self):
return [(x,self.mapping[x]) for x in self.orderedKeys]
def copy(self):
result = OrderedDictionary()
result.mapping = self.mapping.copy()
result.orderedKeys = list(self.orderedKeys)
return result
class OrderedMultiDictionary(object):
"""A dictionary that keeps the keys in the dictionary in order and can store
multiple items per key
Ordered multi dictionaries remember the order that items were inserted
and can store multple values per key. When iterating over an ordered dictionary,
the values are returned in the order their keys were first added.
>>> d = OrderedMultiDictionary()
>>> d["first"] = 0
>>> d["second"] = 1
>>> d["first"] = 2
>>> [x for x in d]
[0, 1, 2]
>>> print d["first"]
[0, 2]
>>> print d["second"]
[1]
"""
def __init__(self):
self.mapping = {}
self.orderedKeys = []
def __setitem__(self, key, value):
if not key in self.mapping:
self.mapping[key] = []
self.mapping[key].append(value)
self.orderedKeys.append((key, len(self.mapping[key]) - 1,))
def __getitem__(self, key):
return self.mapping[key]
def __contains__(self, key):
return key in self.mapping
def __iter__(self):
return list(self.values())
def __len__(self):
return len(self.orderedKeys)
def __str__(self):
result = 'OrderedDictionary({'
elements = []
for x, index in self.orderedKeys:
elements.append(repr(x) + ':' + repr(self[x][index]))
result += ', '.join(elements)
result += '})'
return result
def __repr__(self):
return str(self)
def __getattr__(self, key):
return self.mapping[key]
def keys(self):
return [x for x,index in self.orderedKeys ]
def values(self):
for x, index in self.orderedKeys:
yield self.mapping[x][index]
class CompositeDictionary(object):
"""A dictionary that defers to other dictionaries when an item is
not found.
Composite dictionaries are just like regular dictionaries but they
get items from their parent dictionarkies when they do not contain
the items.
>>> p = {'a':1, 'b':2}
>>> d = CompositeDictionary(p)
>>> d['a']
1
>>> p['a'] = 3
>>> d['a']
3
>>> d['c'] = 2
>>> 'c' in p
False
>>> 'b' in d
True
"""
def __init__(self, *parents):
self.parents = parents
self.mapping = {}
def __setitem__(self, key, value):
self.mapping[key] = value
def __getitem__(self, key):
if key in self.mapping:
return self.mapping[key]
for parent in self.parents:
if key in parent:
return parent[key]
raise KeyError(key)
def __contains__(self, key):
if key in self.mapping:
return True
for parent in self.parents:
if key in parent:
return True
return False
def __iter__(self):
return list(self.keys())
def __len__(self):
return len(list(self.keys()))
def __str__(self):
result = 'CompositeDictionary({'
elements = []
for x in list(self.keys()):
elements.append(str(x) + ':' + str(self[x]) )
result += ','.join(elements)
result += '})'
return result
def keys(self):
keys = set(self.mapping.keys())
for parent in self.parents:
keys |= set(parent.keys())
return iter(keys)
def values(self):
for x in list(self.keys()):
yield self[x]
def copy(self):
result = type(self)(*self.parents)
result.mapping = self.mapping.copy()
return result
class OrderedSet(collections.abc.MutableSet):
class Node(object):
__slots__ = ['key', 'next', 'previous']
def __init__(self, key, next = None, previous = None):
self.key = key
if next is None:
next = self
if previous is None:
previous = self
self.next = next
self.previous = previous
self.link()
def link(self):
self.next.previous = self
self.previous.next = self
def discard(self):
self.previous.next = self.__next__
self.next.previous = self.previous
def __init__(self, iterable=None):
self.end = self.Node(None, None, None)
self.end.previous = self.end
self.end.next = self.end
self.map = {}
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def __iter__(self):
end = self.end
current = end.__next__
while current is not end:
yield current.key
current = current.__next__
def __reversed__(self):
end = self.end
current = end.previous
while current is not end:
yield current.key
current = current.previous
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return not self.isdisjoint(other)
def __del__(self):
self.clear()
def add(self, key):
if key not in self.map:
self.map[key] = self.Node(key, self.end.previous, self.end.__next__)
def discard(self, key):
if key in self.map:
current = self.map.pop(key)
current.discard()
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def memoize(f):
def memof(*arg):
try:
return memof.d[arg]
except:
result=f(*arg)
if len(memof.d)>5000:
return result
memof.d[arg]=result
return result
memof.d={}
return memof
class MultitonMetaClass(type):
def __new__(mcs, name, bases, dict):
dict['__INSTANCES__'] = {}
return type.__new__(mcs, name, bases, dict)
def __call__(mcls, *arguments):
if arguments in mcls.__INSTANCES__:
return mcls.__INSTANCES__[arguments]
else:
instance = type.__call__(mcls, *arguments)
mcls.__INSTANCES__[arguments] = instance
return instance
| 15,197
| 26.98895
| 91
|
py
|
amuse
|
amuse-main/src/amuse/support/exceptions.py
|
class ErrorCode(object):
def __init__(self, majorcode, minorcode, description, formatstring = None):
self.majorcode = majorcode
self.minorcode = minorcode
self.description = description
self.formatstring = formatstring
def __str__(self):
return "E{0}.{1}".format(self.majorcode, self.minorcode)
class AmuseException(Exception):
formatstring = "{0}"
def __init__(self, *arguments):
Exception.__init__(self)
self.arguments = arguments
def __str__(self):
try:
return self.formatstring.format(*self.arguments)
except Exception as ex:
return str(ex) +", "+ self.formatstring +", "+ str(self.arguments)
@property
def errorcode(self):
return self.arguments[-1]
class MissingAttributesAmuseException(AmuseException):
def __init__(self, missing_attributes, *arguments):
AmuseException.__init__(self, *arguments)
self.missing_attributes = missing_attributes
class AmuseWarning(Warning):
pass
class CoreException(AmuseException):
majorcode = 0
errorcode = ErrorCode(majorcode,-1, "core error")
class CodeException(AmuseException):
majorcode = 1
errorcode = ErrorCode(majorcode,-1, "legacy code error")
class KeysNotInStorageException(AmuseException):
def __init__(self, found_keys, found_indices, missing_keys):
AmuseException.__init__(self)
self.found_keys = found_keys
self.found_indices = found_indices
self.missing_keys = missing_keys
def get_found_keys(self):
return self.found_keys
def get_found_indices(self):
return self.found_indices
def get_missing_keys(self):
return self.missing_keys
def __str__(self):
if len(self.missing_keys) == 1:
return "Key not found in storage: {0}".format(self.missing_keys[0])
else:
return "Keys not found in storage: {0}".format(self.missing_keys)
| 2,073
| 26.289474
| 79
|
py
|
amuse
|
amuse-main/src/amuse/support/state.py
|
from amuse.support import exceptions
from amuse.support.options import OptionalAttributes
from amuse.support.options import option
from amuse.support.thirdparty.texttable import Texttable
import logging
class State(object):
def __init__(self, handler, name):
self.handler = handler
self.name = name
self.from_transitions = []
self.to_transitions = []
def __str__(self):
return "state '{0}'".format(self.name)
def matches(self, other):
return other == self
def add_from_transition(self, transition):
"""add a transition starting at this state"""
self.from_transitions.append(transition)
def add_to_transition(self, transition):
"""add a transition to this state"""
self.to_transitions.append(transition)
def remove_from_transition(self, to_state):
index = -1
for i, transition in enumerate(self.from_transitions):
if transition.to_state is to_state:
index = i
if index >= 0:
del self.from_transitions[index]
def remove_to_transition(self, from_state):
index = -1
for i, transition in enumerate(self.to_transitions):
if transition.from_state is from_state:
index = i
if index >= 0:
del self.to_transitions[index]
def get_to_transitions(self):
return list(self.to_transitions)
def is_named(self):
return True
class AllExcept(object):
def __init__(self, states):
self.states = states
self.from_transitions = []
self.to_transitions = []
def is_named(self):
return False
def __str__(self):
return "all except {0}".format(', '.join([str(x) for x in self.states]))
def matches(self, other):
for x in self.states:
if other == x:
return False
return True
def add_from_transition(self, transition):
"""add a transition starting at this state"""
self.from_transitions.append(transition)
def add_to_transition(self, transition):
"""add a transition to this state"""
raise Exception('you cannot define a transition to any except one state')
def remove_from_transition(self, to_state):
index = -1
for i, transition in enumerate(self.from_transitions):
if transition.to_state is to_state:
index = i
if index >= 0:
del self.from_transitions[index]
def remove_to_transition(self, from_state):
pass
def get_to_transitions(self):
"""to transitions are the to transitions of all states except this one"""
result = []
state_machine = self.states[0].handler
for state in state_machine.iter_states():
if state == self:
continue
if not self.matches(state):
continue
else:
for transition in state.get_to_transitions():
if not transition.from_state == self:
result.append(transition)
return result
class StateTransition(object):
def __init__(self, handler, from_state, to_state, method = None, is_auto = True):
self.method = method
self.to_state = to_state
self.from_state = from_state
self.is_auto = is_auto
self.handler = handler
def __str__(self):
return "{0}: transition from {1} to {2}".format(self.handler.interface.model_name,self.from_state, self.to_state)
def do(self):
logging.getLogger("state").info(str(self))
if self.method is None:
self.handler.current_state = self.to_state
else:
self.method.new_method()()
class StateMachine(OptionalAttributes):
def __init__(self, interface, **options):
OptionalAttributes.__init__(self, **options)
self.states = {}
self._do_automatic_state_transitions = True
self._current_state = State(self, None)
self.interface = interface
self._initial_state = None
@option(type='boolean', sections=['state',])
def is_enabled(self):
return True
def enable(self):
self.is_enabled = True
def disable(self):
self.is_enabled = False
def new_transition(self, from_name, to_name, is_auto = True):
from_state = self.new_state(from_name)
to_state = self.new_state(to_name)
transition = StateTransition(self, from_state, to_state, None, is_auto)
if not from_state is None:
from_state.add_from_transition(transition)
if not to_state is None:
to_state.add_to_transition(transition)
return transition
def remove_transition(self, from_name, to_name):
from_state = self.new_state(from_name)
to_state = self.new_state(to_name)
if not from_state is None:
from_state.remove_from_transition(to_state)
if not to_state is None:
to_state.remove_to_transition(from_state)
def iter_states(self):
return self.states.values()
def new_state(self, name):
if name is None:
return None
if name.startswith('!'):
return AllExcept([self.new_state(x) for x in name[1:].split('!')])
if name in self.states:
return self.states[name]
self.states[name] = State(self, name)
return self.states[name]
def set_initial_state(self, name):
self._current_state = self.new_state(name)
self._initial_state = self._current_state
def _get_transitions_path_from_to(self, from_state, to_state):
transitions = [x for x in to_state.get_to_transitions() if x.is_auto]
paths = [[x] for x in transitions]
def has_no_circle(path):
seen_states = set([])
seen_states.add(path[0].from_state)
for transition in path:
if transition.to_state in seen_states:
return False
seen_states.add(transition.to_state)
return True
while paths:
current = paths.pop()
first = current[0]
if first.from_state is None:
yield current
elif first.from_state.matches(from_state):
yield current
else:
transitions = [x for x in first.from_state.get_to_transitions() if x.is_auto]
new_paths = [[x] for x in transitions]
for new_path in new_paths:
new_path.extend(current)
new_paths = filter(has_no_circle, new_paths)
paths.extend(new_paths)
return
def _get_state_transition_path_to(self, state):
all_transitions = self._get_transitions_path_from_to(self._current_state, state)
transitions = []
for x in all_transitions:
if len(transitions) == 0 or len(x) < len(transitions):
transitions = x
if len(transitions) == 0:
raise Exception("No transition from current state {0} to {1} possible".format(self._current_state, state))
transitions_with_methods = [x for x in transitions if not x.method is None]
if not self._do_automatic_state_transitions and len(transitions_with_methods) > 0:
lines = []
lines.append("Interface is not in {0}, should transition from {1} to {0} first.\n". format(state, self._current_state))
for x in transitions:
if x.method is None:
lines.append("{0}, automatic". format(x))
else:
lines.append("{0}, calling '{1}'". format(x, x.method.function_name))
exception = exceptions.AmuseException('\n'.join(lines))
exception.transitions = transitions
raise exception
return transitions
def _do_state_transition_to(self, state):
transitions = self._get_state_transition_path_to(state)
for transition in transitions:
transition.do()
def to_plantuml_string(self):
lines = []
lines.append('@startuml')
initial_state = self._initial_state
lines.append('[*] --> {0}'.format(initial_state.name))
statenames = sorted(self.states.keys())
merged_transitions = {}
for name in statenames:
state = self.states[name]
transitions = state.get_to_transitions()
for transition in transitions:
if transition.from_state.is_named():
if not transition.method is None:
transitionname = '{0}+{1}'.format(
transition.from_state.name,
transition.to_state.name
)
if transitionname in merged_transitions:
merged_transitions[transitionname][2].add(transition.method.function_name)
else:
merged_transitions[transitionname] = [
transition.from_state.name,
transition.to_state.name,
set([transition.method.function_name])
]
#lines.append('{0} --> {1} : {2}'.format(
# transition.from_state.name,
# transition.to_state.name,
# transition.method.function_name
# )
#)
else:
lines.append('{0} -> {1}'.format(
transition.from_state.name,
transition.to_state.name
)
)
else:
for x in self.iter_states():
if x == transition.from_state:
continue
if not transition.from_state.matches(x):
continue
if not transition.method is None:
lines.append('{0} --> {1} : {2}'.format(
x.name,
transition.to_state.name,
transition.method.function_name
)
)
else:
lines.append('{0} -> {1}'.format(
x.name,
transition.to_state.name,
)
)
for fromname, toname, methodnames in merged_transitions.values():
lines.append('{0} --> {1} : {2}'.format(
fromname,
toname,
'\\n'.join(methodnames)
)
)
lines.append('@enduml')
return '\n'.join(lines)
def to_table_string(self, ignore_states = [], split = True):
lines = []
ignore_states = set(ignore_states)
initial_state = self._initial_state
lines.append('Initial state: {0}'.format(initial_state.name))
statenames = sorted(self.states.keys())
merged_transitions = {}
for name in statenames:
state = self.states[name]
transitions = state.get_to_transitions()
for transition in transitions:
if transition.from_state.is_named():
if not transition.method is None:
functionname = transition.method.function_name
else:
functionname = '*'
transitionname = '{0}+{1}'.format(
transition.from_state.name,
transition.to_state.name
)
if transitionname in merged_transitions:
merged_transitions[transitionname][2].add(functionname)
else:
merged_transitions[transitionname] = [
transition.from_state.name,
transition.to_state.name,
set([functionname])
]
else:
for x in self.iter_states():
if x == transition.from_state:
continue
if not transition.from_state.matches(x):
continue
if not transition.method is None:
functionname = transition.method.function_name
else:
functionname = '*'
transitionname = '{0}+{1}'.format(
x.name,
transition.to_state.name
)
if transitionname in merged_transitions:
merged_transitions[transitionname][2].add(functionname)
else:
merged_transitions[transitionname] = [
x.name,
transition.to_state.name,
set([functionname])
]
selectedstates = [x for x in statenames if not x in ignore_states]
tostates = [x for x in selectedstates if not x == initial_state.name]
endstates = []
for fromstate in selectedstates:
found = False
for tostate in tostates:
transitionname = '{0}+{1}'.format(
fromstate,
tostate
)
if transitionname in merged_transitions:
found = True
break
if not found:
endstates.append(fromstate)
if len(endstates) == 1:
lines.append('End state: {0}'.format(endstates[0]))
else:
lines.append('End states: {0}'.format(', '.join(endstates)))
state_to_distance_from_start = {}
state_to_distance_from_start[initial_state.name] = 0
stack = [[initial_state.name, 0]]
while len(stack) > 0:
current, distance = stack.pop()
for tostate in selectedstates:
transitionname = '{0}+{1}'.format(
current,
tostate
)
if transitionname in merged_transitions:
if tostate not in state_to_distance_from_start:
stack.append([tostate, distance+1])
state_to_distance_from_start[tostate] = distance + 1
state_to_distance_from_end = {}
stack = []
for x in endstates:
state_to_distance_from_end[x] = 0
stack.append([x, 0])
while len(stack) > 0:
tostate, distance = stack.pop()
for fromstate in selectedstates:
transitionname = '{0}+{1}'.format(
fromstate,
tostate
)
if transitionname in merged_transitions:
if fromstate not in state_to_distance_from_end:
stack.append([fromstate, distance+1])
state_to_distance_from_end[fromstate] = distance + 1
fromstates = [x for x in selectedstates if not x in endstates]
fromstates = sorted(fromstates, key = lambda x : -(state_to_distance_from_end[x] * len(state_to_distance_from_start)) + state_to_distance_from_start[x])
tostates = sorted(tostates, key = lambda x : -(state_to_distance_from_end[x] * len(state_to_distance_from_start)) + state_to_distance_from_start[x])
if split:
table = Texttable(max_width = -1)
header = [' to\nfrom']
header.extend(tostates)
rows = []
rows.append(header)
for fromstate in fromstates:
row = []
row.append(fromstate)
for tostate in tostates:
transitionname = '{0}+{1}'.format(
fromstate,
tostate
)
if transitionname in merged_transitions:
_, _, functionnames = merged_transitions[transitionname]
row.append('\n'.join(functionnames))
else:
row.append('-')
rows.append(row)
table.add_rows(rows)
table._compute_cols_width()
widths = table._width
splittostates = []
currentostates = []
w0 = widths[0]
currentwidth = w0
for x,state in zip(widths[1:], tostates):
currentwidth += x
if currentwidth > 80:
splittostates.append(currentostates)
currentostates = [state]
currentwidth = w0 + x
else:
currentostates.append(state)
splittostates.append(currentostates)
else:
splittostates = [tostates]
for tostates in splittostates:
header = [' to\nfrom']
header.extend(tostates)
rows = []
rows.append(header)
for fromstate in fromstates:
row = []
row.append(fromstate)
for tostate in tostates:
transitionname = '{0}+{1}'.format(
fromstate,
tostate
)
if transitionname in merged_transitions:
_, _, functionnames = merged_transitions[transitionname]
row.append('\n'.join(functionnames))
else:
row.append('-')
rows.append(row)
table = Texttable(max_width = -1)
align = ["l",]
align.extend("l" * len(tostates))
table.set_cols_align(align)
table.add_rows(rows)
lines.append(table.draw())
return '\n'.join(lines)
def get_name_of_current_state(self):
return self._current_state.name
| 19,392
| 36.009542
| 161
|
py
|
amuse
|
amuse-main/src/amuse/support/project.py
|
"""
Some general functions useful for AMUSE science projects
"""
import os
import os.path
import shutil
import pickle
def new_working_directory(script_filename=None, sub_directories=[]):
"""
Call this function from your script to create a new directory and move
into it, for storing all your simulation output. Invoke it with:
new_working_directory(__file__)
to copy the current version of your script to this new directory for
book-keeping purposes.
"""
i = 0
while os.path.exists("run_{0:=03}".format(i)):
i += 1
new_directory = "run_{0:=03}".format(i)
os.mkdir(new_directory)
print("Created new directory for output:", new_directory)
for sub_directory in sub_directories:
os.mkdir(os.path.join(new_directory, sub_directory))
if not script_filename is None:
shutil.copy(script_filename, new_directory)
os.chdir(new_directory)
def store_results_in_file(results, datafile):
with open(datafile, 'wb') as outfile:
pickle.dump(results, outfile)
def load_results_from_file(datafile):
with open(datafile, 'rb') as infile:
results = pickle.load(infile)
return results
| 1,183
| 31
| 75
|
py
|
amuse
|
amuse-main/src/amuse/support/options.py
|
import configparser
import os.path
import os
import platform
from io import StringIO
from amuse.support.core import late
from amuse.support import exceptions
try:
import pkg_resources
except ImportError:
pkg_resources = None
class GlobalOptions(object):
INSTANCE = None
def __init__(self):
self.config = configparser.ConfigParser()
self.overriden_options = {}
def load(self, preloadfp=None):
if pkg_resources is not None:
if pkg_resources.resource_exists('amuse', 'amuserc'):
resourcerc = pkg_resources.resource_filename('amuse', 'amuserc')
self.config.read(resourcerc)
rootrc = os.path.join(self.amuse_rootdirectory, self.rcfilename)
datarc = os.path.join(self.amuse_data_location, self.rcfilename)
homedirrc = os.path.join(self.homedirectory, '.' + self.rcfilename)
self.config.read(rootrc)
self.config.read(datarc)
if preloadfp is not None:
self.config.read_file(preloadfp, "<amuserc>")
self.config.read(homedirrc)
self.config.read(os.path.join(self.homedirectory, '.' + platform.node() + '_' + self.rcfilename))
if 'AMUSERC' in os.environ:
self.config.read(os.environ['AMUSERC'])
self.config.read(self.rcfilepath)
@late
def amuse_data_location(self):
this = os.path.dirname(os.path.abspath(__file__))
# installed
result = os.path.abspath(os.path.join(this, "..", "..", "..", "..", "..", "share", "amuse"))
if os.path.exists(os.path.join(result, 'config.mk')):
return result
# for some virtualenv setups
result = os.path.abspath(os.path.join(this, "..", "..", "..", "..", "..", "..", "share", "amuse"))
if os.path.exists(os.path.join(result, 'config.mk')):
return result
# in-place
result = os.path.abspath(os.path.join(this, "..", "..", ".."))
if os.path.exists(os.path.join(result, 'config.mk')):
return result
raise exceptions.AmuseException("Could not locate AMUSE root directory! set the AMUSE_DIR variable")
@late
def amuse_rootdirectory(self):
return os.path.dirname(os.path.dirname(__file__))
@late
def rcfilepath(self):
return os.path.join(os.getcwd(), self.rcfilename)
@late
def rcfilename(self):
return 'amuserc'
@late
def homedirectory(self):
path = ''
try:
path = os.path.expanduser("~")
except:
pass
if not os.path.isdir(path):
for evar in ('HOME', 'USERPROFILE', 'TMP'):
try:
path = os.environ[evar]
if os.path.isdir(path):
break
except:
pass
if path:
return path
else:
raise RuntimeError('please define environment variable $HOME')
@classmethod
def instance(cls, preloadfp=None):
if cls.INSTANCE is None:
cls.INSTANCE = cls()
cls.INSTANCE.load(preloadfp)
return cls.INSTANCE
def get_value_for_option(self, option, instance):
if option.name in self.overriden_options:
return self.overriden_options[option.name]
for x in option.get_sections(instance):
if self.config.has_option(x, option.name):
return option.get_value(instance, x, self.config)
return option.get_defaultvalue(instance)
def to_ini_string(self):
file = StringIO()
self.config.write(file)
return file.getvalue()
def override_value_for_option(self, name, value):
self.overriden_options[name] = value
def read_from_ini_string(self, string):
file = StringIO(string)
self.config.read_file(file)
class option(object):
"""Decorator to define an option
:argument type: Type of the value, used when reading from the configuration file.
Can be "string", "int", "float" or "boolean". Defaults to "string"
:argument sections: Sections in the configuration file to search for
the option value, must be an array of strings
:argument choices: When given will check if the value of the option
is in the array (must be a list or set of objects)
:argument name: By default the name of the option in the configuration file
is the same as the name of the function, use this argument to
use a different name (not recommended)
Options can only be defined on subclasses of :class:`OptionalAttributes`
"""
def __init__(self, function=None, type="string", name=None, sections=(), choices=(), global_options=None):
self.specification_method = function
if name is not None:
self.name = name
if self.specification_method is not None:
self.__doc__ = self.specification_method.__doc__
self.sections = sections
if hasattr(self, type.upper()):
self.valuetype = getattr(self, type.upper())
else:
raise exceptions.CoreException("'{0}' is not a valid type for option".format(type))
self.validator = self.default_validator
self.choices = set(choices)
if self.choices:
self.validator = self.choice_validator
if global_options is None:
self.global_options = GlobalOptions.instance()
else:
self.global_options = global_options
@late
def name(self):
return self.specification_method.__name__
def __call__(self, function):
self.specification_method = function
self.__doc__ = self.specification_method.__doc__
return self
def __get__(self, instance, owner):
if instance is None:
return self
if self.name in instance._local_options:
value = instance._local_options[self.name]
else:
value = self.global_options.get_value_for_option(self, instance)
setattr(instance, self.name, value)
return value
def get_value(self, instance, section, options):
return self.validator(self.valuetype(section, options))
def get_defaultvalue(self, instance):
return self.specification_method(instance)
def INT(self, section, options):
return options.getint(section, self.name)
def FLOAT(self, section, options):
return options.getfloat(section, self.name)
def BOOLEAN(self, section, options):
return options.getboolean(section, self.name)
def STRING(self, section, options):
return options.get(section, self.name)
def DICT(self, section, options):
opts=options.get(section, self.name).split(",")
result=dict()
for o in opts:
key,value=o.split("=")
result[key.strip()]=value.strip()
return result
def default_validator(self, value):
return value
def choice_validator(self, value):
if value not in self.choices:
raise exceptions.CoreException("{0} is not a valid choice for option '{1}', valid values are: {2}".format(value, self.name, sorted(self.choices)))
return value
def get_sections(self, instance):
result = []
result.extend(instance.option_sections)
result.append(instance.__class__.__name__)
lastname = instance.__class__.__name__.split('.')[-1]
if not lastname == instance.__class__.__name__:
result.append(lastname)
result.append(lastname.lower())
else:
result.append(instance.__class__.__name__.lower())
result.extend(self.sections)
return result
def __setx__(self, instance, value):
instance._local_options[self.name] = self.validator(value)
try:
delattr(instance, self.name)
except AttributeError as ex:
pass
class OptionalAttributes(object):
"""
Abstract superclass for all classes supporting optional
attributes.
To support optional attributes a class must inherit (directly
or indirectly) from this class.
To support setting the attributes when an object is created
the class must define a *catch-all* keyword argument
in the **__init__** function and send this argument to the
__init__ of the superclass.
The values of options are first searched for in the sections
given in the **option_sections** attribute of the class (empty
by default). Next the sections of the option are searched.
For example::
class MyInterface(OptionalAttributes):
option_sections = ('mysection',)
def __init__(self, **options):
OptionalAttributes.__init__(self, **options)
@option(type="int", choices=(5,10,15), sections=('try',))
def number_of_tries(self):
"number of times to try to connect"
return 5
To code will first search for the value of the option in the
*mysection*, if no value is found the *try* section is searched.
For the following configuration file the **number_of_tries**
attribute will be 10 as the *mysection* section is searched first.
:: ini
[mysection]
number_of_tries = 10
[try]
number_of_tries = 5
The value of the option can be overriden by specifying it when
creating an object of the class.
:: python
x = MyInterface(number_of_tries = 15)
print x.number_of_tries
15
"""
option_sections = ()
def __init__(self, **optional_keyword_arguments):
for key, value in optional_keyword_arguments.items():
if self.hasoption(key):
setattr(self, key, value)
def hasoption(self, name):
the_type = type(self)
return hasattr(the_type, name)
@late
def _local_options(self):
return {}
def iter_options(self):
cls = type(self)
for x in dir(cls):
if x.startswith('_'):
continue
value = getattr(cls, x)
if isinstance(value, option):
yield value
| 10,236
| 30.40184
| 158
|
py
|
amuse
|
amuse-main/src/amuse/support/methods.py
|
from amuse.support.core import late
from amuse.support import exceptions
from amuse.rfi.async_request import (
DependentASyncRequest, AbstractASyncRequest, FakeASyncRequest,
)
import inspect
class AbstractCodeMethodWrapper(object):
def __init__(self, method):
self.method = method
@late
def method_is_legacy(self):
return hasattr(self.method, 'specification')
@late
def method_is_code(self):
return hasattr(self.method, 'method_input_argument_names')
@late
def is_async_supported(self):
if hasattr(self.method, 'is_async_supported'):
return self.method.is_async_supported
elif self.method_is_legacy:
return True
else:
return False
@late
def legacy_specification(self):
if self.method_is_code:
return self.method.legacy_specification
elif self.method_is_legacy:
return self.method.specification
else:
return None
@late
def method_input_argument_names(self):
if self.method_is_code:
return self.method.method_input_argument_names
elif self.method_is_legacy:
return [x.name for x in self.method.specification.input_parameters]
else:
args = inspect.getfullargspec(self.method).args
if args:
if args[0] == 'self' or args[0] == 'cls':
return args[1:]
return args
@late
def optional_method_input_argument_names(self):
if self.method_is_code:
return self.method.optional_method_input_argument_names
elif self.method_is_legacy:
return [x.name for x in self.method.specification.iter_optional_input_parameters()]
else:
argspec = inspect.getfullargspec(self.method)
defaults = argspec.defaults
if defaults is None or len(defaults) == 0:
return []
else:
return argspec.args[-len(defaults):]
@late
def method_output_argument_names(self):
if self.method_is_code:
return self.method.method_output_argument_names
elif self.method_is_legacy:
return [x.name for x in self.method.specification.output_parameters]
else:
return ()
@late
def index_input_attributes(self):
if self.method_is_code:
return self.method.index_input_attributes
else:
return None
@late
def nbody_input_attributes(self):
if self.method_is_code:
return self.method.nbody_input_attributes
else:
return [False] * len(self.method_input_argument_names)
@late
def index_output_attributes(self):
if self.method_is_code:
return self.method.index_output_attributes
else:
return None
class CodeMethodWrapper(AbstractCodeMethodWrapper):
def __init__(self, method, definition):
self.method = method
self.definition = definition
self.definition.check_wrapped_method(self)
def __call__(self, *list_arguments, **keyword_arguments):
async_dependency = keyword_arguments.pop("async_dependency", None)
return_request = keyword_arguments.pop("return_request", False)
if any(isinstance(x, AbstractASyncRequest) for x in list_arguments) or \
any(isinstance(x, AbstractASyncRequest) for x in keyword_arguments):
list_arguments_= []
keyword_arguments_= dict()
for arg in list_arguments:
if isinstance(arg, AbstractASyncRequest):
async_dependency = arg.join(async_dependency)
for key, arg in keyword_arguments.items():
if isinstance(arg, AbstractASyncRequest):
async_dependency = arg.join(async_dependency)
def dummy_factory():
return FakeASyncRequest()
# need this step in between to make sure results are available
request = DependentASyncRequest(async_dependency, dummy_factory)
def factory():
list_arguments_ = []
keyword_arguments_ = dict()
for arg in list_arguments:
if isinstance(arg, AbstractASyncRequest):
list_arguments_.append(arg.result())
else:
list_arguments_.append(arg)
for key,arg in keyword_arguments.items():
if isinstance(arg, AbstractASyncRequest):
keyword_arguments_[key] = arg.result()
else:
keyword_arguments_[key] = arg
return self.asynchronous(*list_arguments_, **keyword_arguments_)
request = DependentASyncRequest(request, factory)
if return_request:
request._result_index = self.convert_result_index()
return request
else:
return request.result()
if async_dependency is not None:
def factory():
return self.asynchronous(*list_arguments, **keyword_arguments)
request = DependentASyncRequest(async_dependency, factory)
if return_request:
request._result_index = self.convert_result_index()
return request
else:
return request.result()
if return_request:
request = self.asynchronous(*list_arguments, **keyword_arguments)
request._result_index = self.convert_result_index()
return request
object = self.precall()
list_arguments, keyword_arguments = self.convert_arguments(list_arguments, keyword_arguments)
result = self.method(*list_arguments, **keyword_arguments)
result = self.convert_result(result)
self.postcall(object)
return result
def asynchronous(self, *list_arguments, **keyword_arguments):
if not self.is_async_supported:
raise exceptions.AmuseException("asynchronous call is not supported for this method")
object = self.precall()
list_arguments, keyword_arguments = self.convert_arguments(list_arguments, keyword_arguments)
request = self.method.asynchronous(*list_arguments, **keyword_arguments)
def handle_result(function):
result = function()
result = self.convert_result(result)
# currently handled after call (on a wait)
# alternatively, this (probably) works for current implemted postcalls
# this could be done immediately
self.postcall(object)
return result
request.add_result_handler(handle_result)
return request
def convert_arguments(self, list_arguments, keyword_arguments):
return self.definition.convert_arguments(self, list_arguments, keyword_arguments)
def convert_result(self, result):
return self.definition.convert_result(self, result)
def convert_result_index(self):
return self.definition.convert_result_index(self)
def precall(self):
return self.definition.precall(self)
def postcall(self, object):
self.definition.postcall(self, object)
def __str__(self):
return 'wrapped<{0}>'.format(self.method)
class CodeMethodWrapperDefinition(object):
def check_wrapped_method(self, method):
pass
def precall(self, method):
return None
def postcall(self, method, object):
pass
def convert_arguments(self, method, list_arguments, keyword_arguments):
return (list_arguments, keyword_arguments)
def convert_result(self, method, result):
return result
def convert_result_index(self, method):
return list(range(len(method.method_output_argument_names)))
class ProxyingMethodWrapper(AbstractCodeMethodWrapper):
def __init__(self, code_interface, attribute_name):
self.code_interface = code_interface
self.attribute_name = attribute_name
self.method = getattr(code_interface, attribute_name)
def __getstate__(self):
return {
"code_interface": self.code_interface,
"attribute_name": self.attribute_name
}
def __setstate__(self, state):
self.__dict__.update(state)
self.method = getattr(self.code_interface, self.attribute_name)
def __call__(self, *list_arguments, **keyword_arguments):
return self.method(*list_arguments, **keyword_arguments)
def asynchronous(self, *list_arguments, **keyword_arguments):
return self.method.asynchronous(*list_arguments, **keyword_arguments)
def __str__(self):
return 'wrapped<{0}>'.format(self.method)
class IncorrectWrappedMethodException(exceptions.AmuseException):
formatstring = "{0}"
| 8,981
| 31.309353
| 101
|
py
|
amuse
|
amuse-main/src/amuse/support/__init__.py
|
import os
from amuse.support.options import option, GlobalOptions
from amuse.support.options import OptionalAttributes
import warnings
"""
Support Code
"""
GlobalOptions=GlobalOptions.instance()
class _Defaults(OptionalAttributes):
@option(sections=['data'])
def amuse_root_dir(self):
if 'AMUSE_DIR' in os.environ:
return os.environ['AMUSE_DIR']
return GlobalOptions.amuse_data_location
def get_amuse_root_dir():
return _Defaults().amuse_root_dir
def get_amuse_data_dir():
return _Defaults().amuse_root_dir
| 569
| 18.655172
| 55
|
py
|
amuse
|
amuse-main/src/amuse/support/console.py
|
import numpy
from amuse.support.exceptions import AmuseException
from amuse.support import options
registered_printing_strategies = {}
class UnsupportedPrintingStrategyException(AmuseException):
"""
Raised when the given printing strategy is not supported by AMUSE.
"""
formatstring = "You tried to set the printing strategy to '{0}', but this printing strategy is not available"
class PrintingStrategy(object):
def __init__(self):
pass
def convert_quantity(self, quantity):
return quantity
def quantity_to_string(self, quantity):
quantity = self.convert_quantity(quantity)
result = self.string_prefix()
result += self.string_number(quantity)
string_of_unit = self.string_unit(quantity)
if string_of_unit:
result += self.string_separator()
result += string_of_unit
result += self.string_suffix()
return result
def string_prefix(self):
return ""
def string_number(self, quantity):
return ""
def string_separator(self):
return " "
def string_unit(self, quantity):
return ""
def string_suffix(self):
return ""
def old_numbers_to_string(self, number):
if hasattr(number, "__iter__"):
return '[' + ', '.join([str(x) for x in number]) + ']'
return str(number)
def numbers_to_string(self, quantity, precision=None):
if precision is None:
fmt = "%s"
else:
fmt = "%#."+str(precision)+"g"
if quantity.is_vector():
def _traverse_vector(vector):
if len(vector.shape) > 1:
return '[' + ', '.join([_traverse_vector(sub) for sub in vector]) + ']'
else:
return '[' + ', '.join([fmt % val for val in vector]) + ']'
return _traverse_vector(quantity.number)
else:
return fmt % quantity.number
@classmethod
def register(cls):
"""
Register this class, so that it can be found by name
in the :func:`set_printing_strategy` function.
"""
add_printing_strategy(cls)
class DefaultPrintingStrategy(PrintingStrategy):
provided_strategy_names = ['default', 'with_units']
def string_number(self, quantity):
return self.numbers_to_string(quantity)
def string_unit(self, quantity):
return str(quantity.unit)
class SimplePrintingStrategy(PrintingStrategy):
provided_strategy_names = ['simple']
def string_number(self, quantity):
factor, print_unit = quantity.unit.to_factor_and_reduced_form()
return self.numbers_to_string(quantity * factor)
def string_unit(self, quantity):
factor, print_unit = quantity.unit.to_factor_and_reduced_form()
return str(print_unit)
class NoUnitsPrintingStrategy(PrintingStrategy):
provided_strategy_names = ['no_unit', 'no_units']
def string_number(self, quantity):
return self.numbers_to_string(quantity)
class FormalPrintingStrategy(PrintingStrategy):
provided_strategy_names = ['formal',]
def string_prefix(self):
return "<quantity "
def string_number(self, quantity):
return self.numbers_to_string(quantity)
def string_separator(self):
return " | "
def string_unit(self, quantity):
return str(quantity.unit)
def string_suffix(self):
return ">"
class NBodyPrintingStrategy(PrintingStrategy):
provided_strategy_names = ['nbody',]
def __init__(self, nbody_converter = None, ignore_converter_exceptions = False):
self.ignore_converter_exceptions = ignore_converter_exceptions
self.nbody_converter = nbody_converter
def convert_quantity(self, quantity):
if is_not_nbody_unit(quantity.unit):
if self.nbody_converter:
return self.nbody_converter.to_nbody(quantity)
elif not self.ignore_converter_exceptions:
raise AmuseException("Unable to convert {0} to N-body units. No "
"nbody_converter given".format(quantity.unit))
return quantity
def string_number(self, quantity):
return self.numbers_to_string(quantity)
class PrintingStrategyWithPreferredUnits(PrintingStrategy):
def convert_quantity(self, quantity):
if has_nbody_unit(quantity.unit):
if self.nbody_converter:
return _quantity_in_preferred_units(self.preferred_units, self.nbody_converter.to_si(quantity))
elif not self.ignore_converter_exceptions:
raise AmuseException("Unable to convert {0} to SI units. No "
"nbody_converter given".format(quantity.unit))
return _quantity_in_preferred_units(self.preferred_units, quantity)
def string_number(self, quantity):
return self.numbers_to_string(quantity)
def string_unit(self, quantity):
if self.print_units:
return str(quantity.unit)
else:
return ""
class AstroPrintingStrategy(PrintingStrategyWithPreferredUnits):
provided_strategy_names = ['astro',]
def __init__(self, nbody_converter = None, print_units = True, ignore_converter_exceptions = None):
self.ignore_converter_exceptions = (print_units if (ignore_converter_exceptions is None)
else ignore_converter_exceptions)
self.nbody_converter = nbody_converter
self.print_units = print_units
from amuse.units import units
self.preferred_units = [units.MSun, units.Myr, units.parsec, units.J]
class SIPrintingStrategy(PrintingStrategyWithPreferredUnits):
provided_strategy_names = ['SI', 'si', 'MKS', 'mks']
def __init__(self, nbody_converter = None, print_units = True, ignore_converter_exceptions = None):
self.ignore_converter_exceptions = (print_units if (ignore_converter_exceptions is None)
else ignore_converter_exceptions)
self.nbody_converter = nbody_converter
self.print_units = print_units
from amuse.units import units
self.preferred_units = [units.m, units.kg, units.s, units.A, units.K, units.mol, units.cd]
class CGSPrintingStrategy(PrintingStrategyWithPreferredUnits):
provided_strategy_names = ['CGS', 'cgs']
def __init__(self, nbody_converter = None, print_units = True, ignore_converter_exceptions = None):
self.ignore_converter_exceptions = (print_units if (ignore_converter_exceptions is None)
else ignore_converter_exceptions)
self.nbody_converter = nbody_converter
self.print_units = print_units
from amuse.units import units
self.preferred_units = [units.cm, units.g, units.s, units.A, units.K, units.mol, units.cd]
class CustomPrintingStrategy(PrintingStrategyWithPreferredUnits):
provided_strategy_names = ['custom',]
def __init__(self, nbody_converter=None, print_units=True, preferred_units=None,
precision=None, prefix="", separator=" ", suffix="",
ignore_converter_exceptions=None):
self.ignore_converter_exceptions = (print_units if (ignore_converter_exceptions is None)
else ignore_converter_exceptions)
self.nbody_converter = nbody_converter
self.print_units = print_units
self.preferred_units = preferred_units
self.precision = precision
self.prefix = prefix
self.separator = separator
self.suffix = suffix
def string_prefix(self):
return self.prefix
def string_number(self, quantity):
return self.numbers_to_string(quantity, precision=self.precision)
def string_separator(self):
return self.separator
def string_suffix(self):
return self.suffix
def _quantity_in_preferred_units(preferred_units, quantity):
if preferred_units is None:
return quantity
for preferred_unit in preferred_units:
if quantity.unit.has_same_base_as(preferred_unit):
return quantity.as_quantity_in(preferred_unit)
if "factor_unit" in str(quantity.unit.__class__):
local = _quantity_in_preferred_units(preferred_units, quantity.unit.local_unit(1.0))
return local.unit.new_quantity(quantity.number * quantity.unit.local_factor * local.number)
if "mul_unit" in str(quantity.unit.__class__):
left = _quantity_in_preferred_units(preferred_units, quantity.unit.left_hand(1.0))
right = _quantity_in_preferred_units(preferred_units, quantity.unit.right_hand(1.0))
return (left.unit * right.unit).new_quantity(quantity.number * left.number * right.number)
if "div_unit" in str(quantity.unit.__class__):
left = _quantity_in_preferred_units(preferred_units, quantity.unit.left_hand(1.0))
right = _quantity_in_preferred_units(preferred_units, quantity.unit.right_hand(1.0))
return (left.unit / right.unit).new_quantity(quantity.number * left.number / right.number)
if "pow_unit" in str(quantity.unit.__class__):
local = _quantity_in_preferred_units(preferred_units, quantity.unit.local_unit(1.0))**quantity.unit.power
return local.unit.new_quantity(quantity.number * local.number)
if "named_unit" in str(quantity.unit.__class__):
local = _quantity_in_preferred_units(preferred_units, quantity.unit.local_unit(1.0))
return local.unit.new_quantity(quantity.number * local.number)
return quantity
def has_nbody_unit(unit):
for factor, x in unit.base:
if x.is_generic():
return True
return False
def is_not_nbody_unit(unit):
for factor, x in unit.base:
if not x.is_generic():
return True
return False
def set_printing_strategy(strategy, **kwargs):
global current_printing_strategy
current_printing_strategy = _get_printing_strategy_factory(strategy)(**kwargs)
def get_current_printing_strategy():
global current_printing_strategy
return current_printing_strategy.__class__
def set_preferred_units(*units):
set_printing_strategy("custom", preferred_units=units)
def _get_printing_strategy_factory(strategy):
if isinstance(strategy, str):
if not strategy in registered_printing_strategies:
raise UnsupportedPrintingStrategyException(strategy)
return registered_printing_strategies[strategy]
else:
return strategy
def add_printing_strategy(class_of_the_printing_strategy):
"""
Register the specified class, so that it can be used
by the :func:`set_printing_strategy` function.
Do not call this method directly, instead use :func:`PrintingStrategy.register`
"""
for x in class_of_the_printing_strategy.provided_strategy_names:
registered_printing_strategies[x] = class_of_the_printing_strategy
class _Defaults(options.OptionalAttributes):
@options.option(sections=['output',])
def printing_strategy(self):
return 'default'
DefaultPrintingStrategy.register()
NoUnitsPrintingStrategy.register()
FormalPrintingStrategy.register()
NBodyPrintingStrategy.register()
AstroPrintingStrategy.register()
SIPrintingStrategy.register()
CGSPrintingStrategy.register()
CustomPrintingStrategy.register()
SimplePrintingStrategy.register()
set_printing_strategy(_Defaults().printing_strategy)
| 11,337
| 32.643917
| 113
|
py
|
amuse
|
amuse-main/src/amuse/support/interface.py
|
import inspect
import itertools
from collections import defaultdict
import numpy
from amuse.units import nbody_system
from amuse.units import generic_unit_system
from amuse.units import quantities
from amuse.units.quantities import is_quantity
from amuse.units.core import unit
from amuse.units import core
from amuse.units import units
from amuse.support.options import OptionalAttributes, option
from amuse.support.methods import CodeMethodWrapper, CodeMethodWrapperDefinition, IncorrectWrappedMethodException
from amuse.support.methods import ProxyingMethodWrapper
from amuse.support.core import late
from amuse.support import exceptions
from amuse.support import state
from amuse import datamodel
from amuse.datamodel import base
from amuse.datamodel import parameters
from amuse.datamodel import incode_storage
class ConvertArgumentsException(core.IncompatibleUnitsException):
formatstring = "{0}"
class OldObjectsBindingMixin(object):
def setup_particles(self, particles):
self.particles.add_particles(particles)
def update_particles(self, particles):
self.particles.copy_values_of_all_attributes_to(particles)
class MethodArgumentOrResultType(object):
_returns_result = True
def append_result_value(self, method, definition, value, result):
result.append(self.convert_result_value(method, definition, value))
def convert_result_value(self, method, definition, value):
return value
def convert_argument_value(self, method, definition, value):
return value
class NoUnitMethodArgumentOrResultType(MethodArgumentOrResultType):
def __reduce__(self):
return (_get_result_type, ("NO_UNIT",))
class UnitMethodArgumentOrResultType(MethodArgumentOrResultType):
def __reduce__(self):
return (_get_result_type, ("UNIT",))
class ErrorCodeMethodArgumentOrResultType(MethodArgumentOrResultType):
_returns_result = False
def append_result_value(self, method, definition, value, result):
self.convert_result_value(method, definition, value)
def convert_result_value(self, method, definition, errorcode):
if hasattr(errorcode, 'any'):
if not errorcode.any():
return
if hasattr(errorcode, '__iter__'):
for x in errorcode:
definition.handle_errorcode(x)
else:
definition.handle_errorcode(errorcode)
def __reduce__(self):
return (_get_result_type, ("ERROR_CODE",))
class IndexMethodArgumentOrResultType(MethodArgumentOrResultType):
def convert_result_value(self, method, definition, value):
return value
def convert_argument_value(self, method, definition, value):
return value
def __reduce__(self):
return (_get_result_type, ("INDEX",))
def _get_result_type(name):
if name == "NO_UNIT":
return MethodWithUnitsDefinition.NO_UNIT
elif name == "UNIT":
return MethodWithUnitsDefinition.UNIT
elif name == "ERROR_CODE":
return MethodWithUnitsDefinition.ERROR_CODE
elif name == "INDEX":
return MethodWithUnitsDefinition.INDEX
class LinkMethodArgumentOrResultType(MethodArgumentOrResultType):
def __init__(self, linked_set_name):
self.linked_set_name = linked_set_name
def get_linked_set(self, method, definition):
# method might provide a shorter path to the interface object
# and is cleaner as definition might move to interface class later
try:
return getattr(definition.wrapped_object, self.linked_set_name)
except Exception as ex:
print(ex)
raise ex
def convert_result_value(self, method, definition, value):
linked_set = self.get_linked_set(method, definition)
storage = linked_set._private.attribute_storage
keys = storage._get_keys_for_indices_in_the_code(value)
result = base.LinkedArray(numpy.empty(len(keys), dtype=object))
for index in range(len(keys)):
key = keys[index]
if key >= 0:
result[index] = linked_set._get_particle(key)
return result
def convert_argument_value(self, method, definition, value):
linked_set = self.get_linked_set(method, definition)
storage = linked_set._private.attribute_storage
if isinstance(value, datamodel.Particle):
indices = storage.get_indices_of([value.key])
return indices
else:
value = base.LinkedArray(numpy.asanyarray(value))
value = value.as_set()
valid = value.get_valid_particles_mask()
all_keys = value.get_all_keys_in_store()
valid_keys = all_keys[valid]
valid_indices = numpy.asarray(storage.get_indices_of(valid_keys))
all_indices = -1 * numpy.ones(len(value), dtype=valid_indices.dtype)
all_indices[valid] = valid_indices
return all_indices
class CodeAttributeWrapper(object):
def __init__(self):
pass
class HandleCodeInterfaceAttributeAccess(object):
def supports(self, name, was_found):
return False
def get_attribute(self, name, result):
return result
def attribute_names(self):
return set([])
def setup(self, object):
pass
def has_name(self, name):
return False
class LegacyInterfaceHandler(HandleCodeInterfaceAttributeAccess):
def __init__(self, legacy_interface):
self.legacy_interface = legacy_interface
self.method_instances = {}
def supports(self, name, was_found):
return name in self.method_instances or hasattr(self.legacy_interface, name)
def get_attribute(self, name, result):
if name not in self.method_instances:
attr = getattr(self.legacy_interface, name)
if hasattr(attr, '__call__'):
self.method_instances[name] = ProxyingMethodWrapper(self.legacy_interface, name)
else:
return attr
return self.method_instances[name]
def attribute_names(self):
return set(dir(self.legacy_interface))
def has_name(self, name):
return name == 'LEGACY'
class HandleConvertUnits(HandleCodeInterfaceAttributeAccess, CodeMethodWrapperDefinition):
def __init__(self, handler):
self.handler = handler
self.converter = None
def supports(self, name, was_found):
return was_found and not self.converter is None
def get_attribute(self, name, attribute):
if inspect.ismethod(attribute):
result = attribute #UnitsConvertionMethod(attribute, self.converter)
elif isinstance(attribute, datamodel.AbstractParticleSet):
result = attribute #datamodel.ParticlesWithUnitsConverted(attribute, self.converter)
elif isinstance(attribute, datamodel.AbstractGrid):
result = attribute
elif isinstance(attribute, quantities.Quantity):
result = self.converter.from_target_to_source(attribute)
elif isinstance(attribute, CodeMethodWrapper):
result = CodeMethodWrapper(attribute, self)
elif isinstance(attribute, parameters.Parameters):
result = parameters.new_parameters_with_units_converted_instance_with_docs(attribute, self.converter)
elif isinstance(attribute, str):
result = attribute
elif isinstance(attribute, bytearray):
result = attribute
elif hasattr(attribute, '__iter__'):
result = list(self.convert_and_iterate(attribute))
else:
result = attribute
return result
def convert_and_iterate(self, iterable):
for x in iterable:
if isinstance(x, quantities.Quantity):
yield self.converter.from_target_to_source(x)
else:
yield x
def set_converter(self, converter):
self.converter = converter
def set_nbody_converter(self, nbody_converter):
self.set_converter(nbody_converter.as_converter_from_si_to_generic())
def has_name(self, name):
return name == 'UNIT'
def setup(self, object):
object.define_converter(self)
def convert_arguments(self, method, list_arguments, keyword_arguments):
converted_list_arguments = []
for x,is_nbody in zip(list_arguments, method.nbody_input_attributes):
if is_nbody:
converted_list_arguments.append(self.from_source_to_target(x))
else:
converted_list_arguments.append(x)
converted_keyword_arguments = {}
for key, value in keyword_arguments.items():
converted_keyword_arguments[key] = self.from_source_to_target(value)
return converted_list_arguments, converted_keyword_arguments
def convert_result(self, method, result):
return self.from_target_to_source(result)
def from_source_to_target(self, x):
if isinstance(x, quantities.Quantity):
return self.converter.from_source_to_target(x)
else:
return x
def from_target_to_source(self, x):
if isinstance(x, quantities.Quantity):
if x.unit.is_non_numeric():
return x
else:
return self.converter.from_target_to_source(x)
elif isinstance(x, str):
return x
elif isinstance(x, numpy.ndarray):
return x
elif hasattr(x, '__len__'):
return list(self.convert_and_iterate(x))
elif hasattr(x, '__iter__'):
return list(self.convert_and_iterate(x))
else:
return x
class StateMethodDefinition(CodeMethodWrapperDefinition):
def __init__(self, state_machine, interface, from_state, to_state, function_name):
self.state_machine = state_machine
self.interface = interface
self.transitions = []
self.add_transition(from_state, to_state)
self.function_name = function_name
self.is_determining_method = False
def add_transition(self, from_state, to_state):
self.transitions.append((from_state, to_state))
def remove_transition(self, from_name, to_name):
index = -1
for i, transition in enumerate(self.transitions):
from_state, to_state = transition
if from_name == from_state.name and to_name == to_state.name:
index = i
if index >= 0:
del self.transitions[index]
def new_method(self, method=None):
if method is None:
if self.is_determining_method:
raise Exception("A state is defined for a method with name '{0}', but the method is not implemented".format(self.function_name))
self.is_determining_method = True
try:
method = getattr(self.interface, self.function_name)
finally:
self.is_determining_method = False
return CodeMethodWrapper(method, self)
def precall(self, method):
stored_transitions = []
for from_state, to_state in self.transitions:
if from_state is None:
return to_state
elif from_state.matches(self.state_machine._current_state):
return to_state
else:
stored_transitions.append((from_state, to_state))
possible_paths = []
for from_state, to_state in stored_transitions:
try:
transition_path = self.state_machine._get_state_transition_path_to(from_state)
possible_paths.append([transition_path, to_state])
except Exception as ex:
pass
if len(possible_paths) == 0:
# do again to get an exception.
message = "While calling {0} of {1}: ".format(self.function_name, type(self.interface))
try:
self.state_machine._get_state_transition_path_to(stored_transitions[0][0])
except exceptions.AmuseException as ex:
args = list(ex.arguments)
args[0] = message+str(args[0])
ex.arguments = tuple(args)
raise ex
except Exception as ex:
args = list(ex.args)
args[0] = message+str(args[0])
ex.args = tuple(args)
raise ex
for path, to_state in sorted(possible_paths, key=lambda x: len(x[0])):
for transition in path:
transition.do()
return to_state
def postcall(self, method, to_state):
if to_state is None:
return
elif to_state.matches(self.state_machine._current_state):
return
else:
self.state_machine._current_state = to_state
def __str__(self):
return "<StateMethod {0}>".format(self.function_name)
class HandleState(HandleCodeInterfaceAttributeAccess):
def __init__(self, interface, **options):
self._mapping_from_name_to_state_method = {}
self.interface = interface
self._state_machine = state.StateMachine(interface, **options)
def supports(self, name, was_found):
if name == 'state_machine':
return True
else:
return self._state_machine.is_enabled and (name in self._mapping_from_name_to_state_method)
def get_attribute(self, name, value):
if name == 'state_machine':
return self._state_machine
else:
return self._mapping_from_name_to_state_method[name].new_method(value)
def attribute_names(self):
result = set(self._mapping_from_name_to_state_method.keys())
result.add('state_machine')
return result
def define_state(self, name):
self._state_machine.new_state(name)
def _add_state_method(self, from_state, to_state, function_name):
if function_name not in self._mapping_from_name_to_state_method:
state_method = StateMethodDefinition(
self._state_machine,
self.interface,
from_state,
to_state,
function_name,
)
self._mapping_from_name_to_state_method[function_name] = state_method
else:
state_method = self._mapping_from_name_to_state_method[function_name]
state_method.add_transition(from_state, to_state)
def _remove_state_method(self, from_name, to_name, function_name):
if function_name in self._mapping_from_name_to_state_method:
state_method = self._mapping_from_name_to_state_method[function_name]
state_method.remove_transition(from_name, to_name)
def add_method(self, state_name, function_name):
"""
Define a method that can run when the interface is in the
provided state.
"""
self._add_state_method(
self._state_machine.new_state(state_name),
None,
function_name,
)
def add_transition(self, from_name, to_name, function_name, is_auto=True):
transition = self._state_machine.new_transition(
from_name, to_name, is_auto,
)
definition = StateMethodDefinition(
self._state_machine,
self.interface,
transition.from_state,
transition.to_state,
function_name,
)
transition.method = definition
self._add_state_method(
transition.from_state,
transition.to_state,
function_name,
)
def remove_transition(self, from_name, to_name, function_name):
self._state_machine.remove_transition(from_name, to_name)
self._remove_state_method(from_name, to_name, function_name)
def add_transition_to_method(self, state_name, function_name, is_auto=True):
"""
Define a method that can run in any state and will transition the
interface to the provided state.
"""
transition = self._state_machine.new_transition(None, state_name, is_auto)
definition = StateMethodDefinition(self._state_machine, self.interface, transition.from_state, transition.to_state, function_name)
transition.method = definition
self._add_state_method(None, transition.to_state, function_name)
def do_automatic_state_transitions(self, boolean):
self._state_machine._do_automatic_state_transitions = boolean
def set_initial_state(self, name):
self._state_machine.set_initial_state(name)
def setup(self, object):
object.define_state(self)
def has_name(self, name):
return name == 'STATE'
def get_name_of_current_state(self):
return self._state_machine.get_name_of_current_state()
class MethodWithUnits(CodeMethodWrapper):
def __init__(self, original_method, definition):
CodeMethodWrapper.__init__(self, original_method, definition)
@late
def index_input_attributes(self):
return self.definition.index_input_attributes
@late
def nbody_input_attributes(self):
return self.definition.nbody_input_attributes
@late
def index_output_attributes(self):
return self.definition.index_output_attributes
class MethodWithUnitsDefinition(CodeMethodWrapperDefinition):
ERROR_CODE = ErrorCodeMethodArgumentOrResultType()
NO_UNIT = NoUnitMethodArgumentOrResultType()
UNIT = UnitMethodArgumentOrResultType()
INDEX = IndexMethodArgumentOrResultType()
LINK = LinkMethodArgumentOrResultType
def __init__(self, wrapped_object, function_name, units, return_units, name):
self.function_name = function_name
if hasattr(units, '__iter__'):
self.units = units
else:
self.units = (units,)
self.return_units = return_units
self.is_return_units_iterable = hasattr(self.return_units, '__iter__')
self.wrapped_object = wrapped_object
self.name = name
if self.return_units is None:
self.handle_return_value = self.handle_as_errorcode
else:
self.handle_return_value = self.handle_as_unit
def __getstate__(self):
result = {}
result.update(self.__dict__)
del result['handle_return_value']
return result
def __setstate__(self, state):
self.__dict__ = state
if self.return_units is None:
self.handle_return_value = self.handle_as_errorcode
else:
self.handle_return_value = self.handle_as_unit
def check_wrapped_method(self, method):
if method.method_is_legacy or method.method_is_code:
self.check_outputs_of_method(method)
self.check_inputs_of_method(method)
def new_method(self, original_method):
if self.has_same_name_as_original:
return MethodWithUnits(original_method, self)
else:
return MethodWithUnits(
getattr(self.wrapped_object, self.function_name), self
)
def handle_errorcode(self, errorcode):
if errorcode in self.wrapped_object.errorcodes:
raise exceptions.AmuseException(
f"Error when calling '{self.name}' of a "
f"'{type(self.wrapped_object)}', errorcode is "
f"{errorcode}, error is "
f"'{self.wrapped_object.errorcodes[errorcode]}'",
errorcode
)
elif errorcode < 0:
raise exceptions.AmuseException(
f"Error when calling '{self.name}' of a "
f"'{type(self.wrapped_object)}', errorcode is "
f"{errorcode}",
errorcode
)
else:
return errorcode
def handle_as_errorcode(self, method, errorcode):
if hasattr(errorcode, 'any'):
if not errorcode.any():
return
if hasattr(errorcode, '__iter__'):
for x in errorcode:
self.handle_errorcode(x)
else:
self.handle_errorcode(errorcode)
def handle_as_unit(self, method, return_value):
if not self.is_return_units_iterable:
return self.return_units.convert_result_value(
method, self, return_value)
else:
if not hasattr(return_value, '__iter__'):
return_value = [return_value]
result = []
for value, unit in zip(return_value, self.return_units):
unit.append_result_value(method, self, value, result)
if len(result) == 1:
return result[0]
else:
return result
def convert_arguments(self, method, list_arguments, keyword_arguments):
result = {}
input_parameters = method.method_input_argument_names
for index, parameter in enumerate(input_parameters):
if parameter in keyword_arguments:
if self.units[index] == self.NO_UNIT:
arg = keyword_arguments[parameter]
if is_quantity(arg):
result[parameter] = arg.value_in(units.none)
else:
result[parameter] = arg
elif self.units[index] == self.INDEX:
result[parameter] = keyword_arguments[parameter]
elif self.units[index] == self.UNIT:
result[parameter] = keyword_arguments[parameter]
else:
result[parameter] = quantities.value_in(
keyword_arguments[parameter],self.units[index])
for index, argument in enumerate(list_arguments):
parameter = input_parameters[index]
if parameter in result:
raise ConvertArgumentsException(
f"got multiple values for argument '{parameter}' "
f"of method {self.function_name}"
)
try:
if self.units[index] == self.NO_UNIT:
if is_quantity(argument):
result[parameter] = argument.value_in(units.none)
else:
result[parameter] = argument
elif self.units[index] == self.INDEX:
result[parameter] = argument
elif self.units[index] == self.UNIT:
result[parameter] = argument
elif type(self.units[index]) == self.LINK:
result[parameter] = self.units[index].convert_argument_value(method, self, argument)
else:
if self.units[index].is_none() and not hasattr(argument,'unit'):
result[parameter] = argument
else:
result[parameter] = quantities.value_in(argument,self.units[index])
except core.IncompatibleUnitsException as ex:
raise ConvertArgumentsException(
f"error while converting parameter '{parameter}', "
f"error: {ex}"
)
except Exception as ex:
raise exceptions.AmuseException(
f"error while converting parameter '{parameter}', "
f"error: {ex}"
)
return (), result
def convert_result(self, method, result):
return self.handle_return_value(method, result)
# this function tries to determine the size of results from definition
# it's a bit ad-hoc.
# in spite of what the name suggests it determines it from scratch
# (and not converting the index from the wrapped function's result_index)
def convert_result_index(self, method):
if self.return_units is None:
return None
else:
if not self.is_return_units_iterable:
return None
else:
nresult = 0
for unit in self.return_units:
if not hasattr(unit, "_returns_result"):
nresult += 1
else:
if unit._returns_result:
nresult += 1
return list(range(nresult))
@late
def has_same_name_as_original(self):
return self.function_name == self.name
@late
def index_input_attributes(self):
return [x == self.INDEX for x in self.units]
@late
def nbody_input_attributes(self):
return [isinstance(x, UnitMethodArgumentOrResultType) or isinstance(x, unit) and generic_unit_system.is_generic_unit(x) for x in self.units]
@late
def index_output_attributes(self):
if not hasattr(self.return_units, '__iter__'):
return [self.return_units == self.INDEX]
else:
return [x == self.INDEX for x in self.return_units]
def check_inputs_of_method(self, method):
specification = method.legacy_specification
if specification is None:
return
number_expected_inputs = len(specification.input_parameters)
if self.units:
if hasattr(self.units, '__len__'):
number_specified_inputs = len(self.units)
else:
number_specified_inputs = 1
else:
number_specified_inputs = 0
if number_expected_inputs != number_specified_inputs:
raise IncorrectMethodDefinition(
self.name,
type(self.wrapped_object).__name__,
number_expected_inputs,
number_specified_inputs,
'inputs'
)
def check_outputs_of_method(self, method):
specification = method.legacy_specification
if specification is None:
return
number_expected_outputs = len(specification.output_parameters)
if specification.result_type is not None:
number_expected_outputs += 1
if self.return_units:
if hasattr(self.return_units, '__len__'):
number_specified_outputs = len(self.return_units)
else:
number_specified_outputs = 1
else:
number_specified_outputs = 0
if number_expected_outputs == 1 and number_specified_outputs == 0:
return # default error checks for one output
if number_expected_outputs != number_specified_outputs:
raise IncorrectMethodDefinition(
self.name,
type(self.wrapped_object).__name__,
number_expected_outputs,
number_specified_outputs,
'outputs'
)
class HandleMethodsWithUnits(object):
ERROR_CODE = MethodWithUnitsDefinition.ERROR_CODE
NO_UNIT = MethodWithUnitsDefinition.NO_UNIT
INDEX = MethodWithUnitsDefinition.INDEX
LINK = MethodWithUnitsDefinition.LINK
UNIT = MethodWithUnitsDefinition.UNIT
def __init__(self, interface):
self.method_definitions = {}
self.interface = interface
self.setup_units_from_legacy_interface()
self.method_instances = {}
def setup_units_from_legacy_interface(self):
for name, specification in self.interface_function_specifications():
units = [x.unit for x in specification.input_parameters]
return_units = [x.unit for x in specification.output_parameters]
if not specification.result_type is None:
if specification.result_unit is None:
return_units.append(MethodWithUnitsDefinition.ERROR_CODE)
else:
return_units.append(specification.result_unit)
default_to_nounit = lambda y: MethodWithUnitsDefinition.NO_UNIT if y is None else y
return_units = [ default_to_nounit(x) for x in return_units]
units = [default_to_nounit(x) for x in units]
definition = MethodWithUnitsDefinition(
self.interface,
name,
units,
return_units,
name
)
self.method_definitions[name] = definition
def interface_function_specifications(self):
interface_type = type(self.interface.legacy_interface)
attribute_names = dir(interface_type)
result = []
for x in attribute_names:
if x.startswith('__'):
continue
value = getattr(interface_type, x)
if hasattr(value, 'specification') and hasattr(value.specification, 'input_parameters'):
result.append( [x,value.specification])
result.sort(key=lambda x: x[1].id)
return result
def supports(self, name, was_found):
return name in self.method_definitions
def get_attribute(self, name, value):
if not name in self.method_instances:
self.method_instances[name] = self.method_definitions[name].new_method(value)
return self.method_instances[name]
def attribute_names(self):
return set(self.method_definitions.keys())
def add_method(self, original_name, units, return_unit=None, public_name=None):
if public_name is None:
public_name = original_name
definition = MethodWithUnitsDefinition(
self.interface,
original_name,
units,
return_unit,
public_name
)
self.method_definitions[public_name] = definition
def has_name(self, name):
return name == 'METHOD'
def setup(self, object):
object.define_methods(self)
class PropertyWithUnitsDefinition(object):
def __init__(self, handler, function_or_attribute_name, unit, public_name):
self.function_or_attribute_name = function_or_attribute_name
self.unit = unit
self.public_name = public_name
self.handler = handler
def get_value(self, original):
if self.has_same_name_as_original:
function_or_attribute = original
else:
function_or_attribute = getattr(self.handler.interface, self.function_or_attribute_name)
if hasattr(function_or_attribute, '__call__'):
return_value = function_or_attribute()
if hasattr(return_value, '__iter__'):
if len(return_value) > 2:
return_value = list(return_value)
value, errorcode = return_value[:-1], return_value[-1]
else:
value, errorcode = return_value
if errorcode < 0:
raise exceptions.AmuseException("calling '{0}' to get the value for property '{1}' resulted in an error (errorcode {2})".format(self.function_or_attribute_name, self.public_name, errorcode))
else:
return self.unit.new_quantity(value)
else:
return self.unit.new_quantity(return_value)
else:
return self.unit.new_quantity(function_or_attribute)
@late
def has_same_name_as_original(self):
return self.function_or_attribute_name == self.public_name
class HandlePropertiesWithUnits(HandleCodeInterfaceAttributeAccess):
def __init__(self, interface):
self.property_definitions = {}
self.interface = interface
def supports(self, name, was_found):
return name in self.property_definitions
def get_attribute(self, name, value):
return self.property_definitions[name].get_value(value)
def attribute_names(self):
return set(self.property_definitions.keys())
def add_property(self, function_name, unit=None, public_name=None):
if public_name is None:
if function_name.startswith('get_'):
public_name = function_name[4:]
else:
public_name = function_name
if unit is None:
definition = PropertyDefinition(self, function_name, public_name)
else:
definition = PropertyWithUnitsDefinition(
self,
function_name,
unit,
public_name
)
self.property_definitions[public_name] = definition
def has_name(self, name):
return name == 'PROPERTY'
def setup(self, object):
object.define_properties(self)
class HandleParameters(HandleCodeInterfaceAttributeAccess):
def __init__(self, interface):
self.property_definitions = {}
self.interface = interface
self.definitions = defaultdict(list, parameters=[])
self.parameters = {}
def supports(self, name, was_found):
return name in self.definitions.keys()
def get_attribute(self, name, value):
# note: parameters can be added after init, not yet removed
name = name or 'parameters'
if name not in self.parameters:
d = self.definitions[name]
self.parameters[name] = parameters.new_parameters_instance_with_docs(d, self.interface)
else:
self.parameters[name].update()
result = self.parameters[name]
return result
def attribute_names(self):
return set(self.definitions.keys())
def add_method_parameter(self, get_method, set_method, name, description,
default_value=None, must_set_before_get=False, is_vector=False,
parameter_set='parameters'):
if is_vector:
definition = parameters.ModuleVectorMethodParameterDefinition(
get_method,
set_method,
name,
description,
default_value,
must_set_before_get=must_set_before_get
)
else:
definition = parameters.ModuleMethodParameterDefinition(
get_method,
set_method,
name,
description,
default_value,
must_set_before_get=must_set_before_get
)
self.definitions[parameter_set].append(definition)
def add_alias_parameter(
self,
name,
aliased_name,
description,
parameter_set='parameters',
alias_set=None,
):
definition = parameters.AliasParameterDefinition(
name,
aliased_name,
description,
alias_set=alias_set
)
self.definitions[parameter_set].append(definition)
def add_caching_parameter(
self,
function_name,
parameter_name,
name,
description,
default_value=None,
parameter_set='parameters',
):
definition = parameters.ModuleCachingParameterDefinition(
function_name,
parameter_name,
name,
description,
default_value
)
self.definitions[parameter_set].append(definition)
def add_boolean_parameter(
self,
get_method,
set_method,
name,
description,
default_value=None,
parameter_set='parameters',
):
definition = parameters.ModuleBooleanParameterDefinition(
get_method,
set_method,
name,
description,
default_value
)
self.definitions[parameter_set].append(definition)
def add_default_form_parameter(
self, name, description, default, parameter_set='parameters'
):
if isinstance(default, bool):
self.add_boolean_parameter(
"get_"+name,
"set_"+name,
name,
description,
default,
parameter_set='parameters'
)
else:
self.add_method_parameter(
"get_"+name,
"set_"+name,
name,
description,
default,
parameter_set='parameters',
)
def add_array_parameter(
self,
get_method,
set_method,
range_method,
name,
description,
parameter_set='parameters',
):
definition = parameters.ModuleArrayParameterDefinition(
get_method,
set_method,
range_method,
name,
description
)
self.definitions[parameter_set].append(definition)
def has_name(self, name):
return name == 'PARAMETER'
def setup(self, object):
object.define_parameters(self)
def add_vector_parameter(
self,
name,
description,
parameter_names,
parameter_set='parameters',
):
default_value = None
for parameter_name in parameter_names:
for defined_parameter in self.definitions[parameter_set]:
if defined_parameter.name == parameter_name:
if default_value is None:
if not is_quantity(defined_parameter.default_value):
default_value = []
else:
default_value = quantities.AdaptingVectorQuantity()
default_value.append(defined_parameter.default_value)
definition = parameters.VectorParameterDefinition(
name,
description,
parameter_names,
default_value
)
self.definitions[parameter_set].append(definition)
def add_interface_parameter(
self,
name,
description,
default_value,
state_guard=None,
parameter_set='parameters',
):
definition = parameters.InterfaceParameterDefinition(
name,
description,
default_value,
state_guard=state_guard,
)
self.definitions[parameter_set].append(definition)
class HandleErrorCodes(HandleCodeInterfaceAttributeAccess):
def __init__(self, interface):
self.error_codes = {}
self.interface = interface
def supports(self, name, was_found):
return name == 'errorcodes'
def get_attribute(self, name, value):
return self.error_codes
def attribute_names(self):
return set(['errorcodes'])
def add_errorcode(self, number, string):
self.error_codes[number] = string
def has_name(self, name):
return name == 'ERRORCODE'
def setup(self, object):
object.define_errorcodes(self)
class AbstractParticleSetDefinition(object):
def set_new(self, name_of_new_particle_method, names=None):
self.new_particle_method = (name_of_new_particle_method, names)
def set_grid_range(self, name_of_the_get_range_method):
self.name_of_the_get_range_method = name_of_the_get_range_method
def set_delete(self, name_of_delete_particle_method):
self.name_of_delete_particle_method = name_of_delete_particle_method
def add_getter(self, name_of_the_getter, names=None):
self.getters.append((name_of_the_getter, names))
def add_setter(self, name_of_the_setter, names=None):
self.setters.append((name_of_the_setter, names))
def add_gridded_getter(self, name_of_the_getter, name_of_the_range_method, names=None):
self.gridded_getters.append((name_of_the_getter,name_of_the_range_method, names))
def add_gridded_setter(self, name_of_the_setter, name_of_the_range_method, names=None):
self.gridded_setters.append((name_of_the_setter,name_of_the_range_method, names))
def add_attribute(self, name_of_the_attribute, name_of_the_method, names=None):
self.attributes.append((name_of_the_attribute,name_of_the_method, names))
def add_query(self, name_of_the_query, names=(), public_name=None):
if not public_name:
public_name = name_of_the_query
self.queries.append((name_of_the_query, names, public_name))
def add_method(self, name_of_the_method, public_name=None):
if not public_name:
public_name = name_of_the_method
self.methods.append((name_of_the_method, public_name))
def add_select_from_particle(self, name, names=(), public_name=None):
if not public_name:
public_name = name
self.selects_form_particle.append((name, names, public_name))
def define_extra_keywords(self, dictionary):
self.extra_keyword_arguments_for_getters_and_setters = dictionary
def add_subselect_in_set(self, name, set_query_arguments_name=None, get_number_of_particles_name=None, public_name=None):
if not public_name:
public_name = name
self.subselects_in_set.append((name, set_query_arguments_name, get_number_of_particles_name, public_name))
def add_subselect_from_particle(self, name, get_number_of_particles_name=None, public_name=None):
if not public_name:
public_name = name
self.subselects_from_particle.append((name, get_number_of_particles_name, public_name))
class ParticleSetDefinition(AbstractParticleSetDefinition):
def __init__(self, handler):
self.handler = handler
self.name_of_indexing_attribute = 'index_of_the_particle'
self.new_particle_method = ('new_particle', (), None)
self.name_of_delete_particle_method = 'delete_particle'
self.name_of_number_of_particles_method = 'get_number_of_particles'
self.setters = []
self.getters = []
self.gridded_getters = []
self.gridded_setters = []
self.queries = []
self.attributes = []
self.selects_form_particle = []
self.subselects_in_set = []
self.subselects_from_particle = []
self.methods = []
self.is_superset = False
self.is_inmemory = False
self.particles_factory = datamodel.Particles
def new_storage(self, interface):
if self.is_inmemory:
return datamodel.get_in_memory_attribute_storage_factory()()
setters = []
for name, names in self.setters:
x = incode_storage.ParticleSetAttributesMethod(getattr(interface, name), names)
setters.append(x)
getters = []
for name, names in self.getters:
x = incode_storage.ParticleGetAttributesMethod(getattr(interface, name), names)
getters.append(x)
for name, range_method_name, names in self.gridded_getters:
x = incode_storage.ParticleGetGriddedAttributesMethod(
getattr(interface, name),
getattr(interface, range_method_name),
names
)
getters.append(x)
for name, range_method_name, names in self.gridded_setters:
x = incode_storage.ParticleSetGriddedAttributesMethod(
getattr(interface, name),
getattr(interface, range_method_name),
names
)
setters.append(x)
name, names = self.new_particle_method
new_particle_method = incode_storage.NewParticleMethod(getattr(interface, name), names)
delete_particle_method = getattr(interface, self.name_of_delete_particle_method)
number_of_particles_method = None # getattr(interface, self.name_of_number_of_particles_method)
return incode_storage.InCodeAttributeStorage(
interface,
new_particle_method,
delete_particle_method,
number_of_particles_method,
setters,
getters,
self.name_of_indexing_attribute
)
def new_set_instance(self, handler):
storage = self.new_storage(handler.interface)
if self.is_inmemory:
result = self.particles_factory(handler.interface, storage=storage)
else:
result = self.particles_factory(storage=storage)
queries = self.new_queries(handler.interface)
for x in queries:
result.add_function_attribute(x.public_name, x.apply)
selects = self.new_selects_from_particle(handler.interface)
for x in selects:
result.add_function_attribute(x.public_name, x.apply_on_all)
result.add_particle_function_attribute(x.public_name, x.apply_on_one)
selects = self.new_subselects_from_particle(handler.interface)
for x in selects:
# result.add_function_attribute(x.public_name, x.apply_on_all)
result.add_particle_function_attribute(x.public_name, x.apply_on_one)
selects = self.new_subselects_in_set(handler.interface)
for x in selects:
result.add_function_attribute(x.public_name, x.apply_on_all)
selects = self.new_particle_methods(handler.interface)
for x in selects:
result.add_function_attribute(x.public_name, x.apply_on_all, x.apply_on_one)
attributes = self.attributes
for name_of_the_attribute, name_of_the_method, names in attributes:
result.add_calculated_attribute(name_of_the_attribute, getattr(handler.interface, name_of_the_method), names)
return result
def new_queries(self, interface):
queries = []
for name, names, public_name in self.queries:
x = incode_storage.ParticleQueryMethod(getattr(interface, name), names, public_name)
queries.append(x)
return queries
def new_selects_from_particle(self, interface):
results = []
for name, names, public_name in self.selects_form_particle:
x = incode_storage.ParticleSpecificSelectMethod(getattr(interface, name), names, public_name)
results.append(x)
return results
def new_particle_methods(self, interface):
results = []
for name, public_name in self.methods:
x = incode_storage.ParticleMethod(getattr(interface, name), public_name)
results.append(x)
return results
def new_subselects_in_set(self, interface):
results = []
for name, set_query_arguments_name, number_of_particles_name, public_name in self.subselects_in_set:
number_of_particles_method = None if number_of_particles_name is None else getattr(interface, number_of_particles_name)
set_query_arguments_method = None if set_query_arguments_name is None else getattr(interface, set_query_arguments_name)
x = incode_storage.ParticleSetSelectSubsetMethod(getattr(interface, name), set_query_arguments_method, number_of_particles_method, public_name)
results.append(x)
return results
def new_subselects_from_particle(self, interface):
results = []
for name, get_number_of_particles_name, public_name in self.subselects_from_particle:
number_of_particles_method = None if get_number_of_particles_name is None else getattr(interface, get_number_of_particles_name)
x = incode_storage.ParticleSpecificSelectSubsetMethod(getattr(interface, name), number_of_particles_method, public_name)
results.append(x)
return results
class ParticleSupersetDefinition(AbstractParticleSetDefinition):
def __init__(self, handler, particle_subset_names, index_to_default_set=None):
self.handler = handler
self.particle_subset_names = particle_subset_names
self.index_to_default_set = index_to_default_set
self.is_superset = True
self.particles_factory = datamodel.ParticlesSuperset
self.queries = []
def new_set_instance(self, handler):
subsets = [handler.get_attribute(subset_name, None) for subset_name in self.particle_subset_names]
result = self.particles_factory(
subsets,
index_to_default_set=self.index_to_default_set
)
for one_query in self.new_queries(handler.interface):
result.add_function_attribute(one_query.public_name, one_query.apply)
return result
def new_queries(self, interface):
queries = []
for name, names, public_name in self.queries:
x = incode_storage.ParticleQueryMethod(getattr(interface, name), names, public_name, query_superset=True)
queries.append(x)
return queries
class GridDefinition(AbstractParticleSetDefinition):
def __init__(self, handler, grid_class=datamodel.Grid):
self.handler = handler
self.axes_names = None
self.name_of_the_get_range_method = 'get_range'
self.setters = []
self.getters = []
self.gridded_setters = []
self.gridded_getters = []
self.particles_factory = grid_class
self.extra_keyword_arguments_for_getters_and_setters = {}
def new_storage(self, interface):
setters = []
for name, names in self.setters:
x = incode_storage.ParticleSetAttributesMethod(getattr(interface, name), names)
setters.append(x)
getters = []
for name, names in self.getters:
x = incode_storage.ParticleGetAttributesMethod(getattr(interface, name), names)
getters.append(x)
for name, range_method_name, names in self.gridded_getters:
x = incode_storage.ParticleGetGriddedAttributesMethod(
getattr(interface, name),
getattr(interface, range_method_name),
names
)
getters.append(x)
for name, range_method_name, names in self.gridded_setters:
x = incode_storage.ParticleSetGriddedAttributesMethod(
getattr(interface, name),
getattr(interface, range_method_name),
names
)
setters.append(x)
range_method = getattr(interface, self.name_of_the_get_range_method)
return incode_storage.InCodeGridAttributeStorage(
interface,
range_method,
setters,
getters,
self.extra_keyword_arguments_for_getters_and_setters
)
def new_set_instance(self, handler):
storage = self.new_storage(handler.interface)
result = self.particles_factory(storage=storage)
if self.axes_names is not None:
result.add_vector_attribute("position",self.axes_names)
return result
class CodeInMemoryParticles(datamodel.Particles):
def __init__(self, code_interface=None, storage=None):
datamodel.Particles.__init__(self, storage=storage)
self._private.code_interface = code_interface
class HandleParticles(HandleCodeInterfaceAttributeAccess):
def __init__(self, interface):
self.interface = interface
self.mapping_from_name_to_set_definition = {}
self.mapping_from_name_to_set_instance = {}
def supports(self, name, was_found):
return name in self.mapping_from_name_to_set_definition
def get_attribute(self, name, value):
if name in self.mapping_from_name_to_set_instance:
return self.mapping_from_name_to_set_instance[name]
else:
set_definition = self.mapping_from_name_to_set_definition[name]
if set_definition.state_guard:
getattr(self.interface, set_definition.state_guard)()
result = set_definition.new_set_instance(self)
self.mapping_from_name_to_set_instance[name] = result
return result
def attribute_names(self):
return set(self.mapping_from_name_to_set_definition.keys())
def has_name(self, name):
return name == 'PARTICLES' or name == 'DATASETS' or name == 'GRIDS'
def setup(self, object):
object.define_particle_sets(self)
object.define_data_sets(self)
object.define_grids(self)
def define_set(self, name, name_of_indexing_attribute='index_of_the_particle', state_guard=None):
definition = ParticleSetDefinition(self)
definition.name_of_indexing_attribute = name_of_indexing_attribute
definition.state_guard = state_guard
self.mapping_from_name_to_set_definition[name] = definition
def define_super_set(self, name, particle_subsets, index_to_default_set=None, state_guard=None):
definition = ParticleSupersetDefinition(self, particle_subsets, index_to_default_set)
definition.state_guard = state_guard
self.mapping_from_name_to_set_definition[name] = definition
def define_inmemory_set(self, name, particles_factory=CodeInMemoryParticles, state_guard=None):
definition = ParticleSetDefinition(self)
definition.is_inmemory = True
definition.particles_factory = particles_factory
definition.state_guard = state_guard
self.mapping_from_name_to_set_definition[name] = definition
def define_grid(self, name, name_of_indexing_attribute='index_of_the_particle', axes_names=None, grid_class=datamodel.Grid, state_guard=None):
definition = GridDefinition(self, grid_class=grid_class)
definition.name_of_indexing_attribute = name_of_indexing_attribute
definition.axes_names = axes_names
definition.state_guard = state_guard
self.mapping_from_name_to_set_definition[name] = definition
def set_new(self, name_of_the_set, name_of_new_particle_method, names=None):
self.mapping_from_name_to_set_definition[name_of_the_set].set_new(name_of_new_particle_method, names=names)
def set_grid_range(self, name_of_the_set, name_of_the_get_range_method):
self.mapping_from_name_to_set_definition[name_of_the_set].set_grid_range(name_of_the_get_range_method)
def set_delete(self, name_of_the_set, name_of_delete_particle_method):
self.mapping_from_name_to_set_definition[name_of_the_set].set_delete(name_of_delete_particle_method)
def add_getter(self, name_of_the_set, name_of_the_getter, names=None):
self.mapping_from_name_to_set_definition[name_of_the_set].add_getter(name_of_the_getter, names=names)
def add_setter(self, name_of_the_set, name_of_the_setter, names=None):
self.mapping_from_name_to_set_definition[name_of_the_set].add_setter(name_of_the_setter, names=names)
def add_gridded_getter(self, name_of_the_set, name_of_the_getter, name_of_the_range_method, names=None):
self.mapping_from_name_to_set_definition[name_of_the_set].add_gridded_getter(name_of_the_getter, name_of_the_range_method, names=names)
def add_gridded_setter(self, name_of_the_set, name_of_the_setter, name_of_the_range_method, names=None):
self.mapping_from_name_to_set_definition[name_of_the_set].add_gridded_setter(name_of_the_setter, name_of_the_range_method, names=names)
def add_attribute(self, name_of_the_set, name_of_the_attribute, name_of_the_method, names=None):
self.mapping_from_name_to_set_definition[name_of_the_set].add_attribute(name_of_the_attribute, name_of_the_method, names=names)
def add_query(self, name_of_the_set, name_of_the_query, names=(), public_name=None):
self.mapping_from_name_to_set_definition[name_of_the_set].add_query(name_of_the_query, names=names, public_name=public_name)
def add_method(self, name_of_the_set, name_of_the_method, public_name=None):
self.mapping_from_name_to_set_definition[name_of_the_set].add_method(name_of_the_method, public_name=public_name)
def add_select_from_particle(self, name_of_the_set, name, names=(), public_name=None):
self.mapping_from_name_to_set_definition[name_of_the_set].add_select_from_particle(name, names=names, public_name=public_name)
def define_extra_keywords(self, name_of_the_set, dictionary):
self.mapping_from_name_to_set_definition[name_of_the_set].define_extra_keywords(dictionary)
def add_subselect_in_set(self, name_of_the_set, name, set_query_arguments_name=None, get_number_of_particles_name=None, public_name=None):
self.mapping_from_name_to_set_definition[name_of_the_set].add_subselect_in_set(
name,
set_query_arguments_name=set_query_arguments_name,
get_number_of_particles_name=get_number_of_particles_name,
public_name=public_name
)
def add_subselect_from_particle(self, name_of_the_set, name, get_number_of_particles_name=None, public_name=None):
self.mapping_from_name_to_set_definition[name_of_the_set].add_subselect_from_particle(
name,
get_number_of_particles_name=get_number_of_particles_name,
public_name=public_name
)
def _cleanup_instances(self):
self.mapping_from_name_to_set_instance = {}
class OverriddenCodeInterface(object):
def __init__(self, code_interface):
self.code_interface = code_interface
def __getattr__(self, name):
return self.code_interface.__getattr__(name)
class InCodeComponentImplementation(OldObjectsBindingMixin, OptionalAttributes):
def __init__(self, legacy_interface, **options):
OptionalAttributes.__init__(self, **options)
self.legacy_interface = legacy_interface
self._options = options
self._handlers = []
self.__init_handlers__(legacy_interface, options)
def __init_handlers__(self, legacy_interface, options):
self._handlers.append(LegacyInterfaceHandler(legacy_interface))
self._handlers.append(HandleMethodsWithUnits(self))
self._handlers.append(HandlePropertiesWithUnits(self))
self._handlers.append(HandleParameters(self))
self._handlers.append(HandleParticles(self))
if self.must_handle_state:
self._handlers.append(HandleState(self, **options))
self._handlers.append(HandleConvertUnits(self))
self._handlers.append(HandleErrorCodes(self))
self.setup()
@option(type='boolean', sections=("code", "state",))
def must_handle_state(self):
return True
def setup(self):
for x in self._handlers:
x.setup(self)
def define_state(self, handler):
pass
def define_methods(self, handler):
pass
def define_properties(self, handler):
pass
def define_converter(self, handler):
pass
def define_parameters(self, handler):
pass
def define_particle_sets(self, handler):
pass
def define_data_sets(self, handler):
pass
def define_grids(self, handler):
pass
def define_errorcodes(self, handler):
pass
def get_handler(self, name):
for x in self._handlers:
if x.has_name(name):
return x
return None
def __getattr__(self, name):
result = None
found = False
for handler in self._handlers:
if handler.supports(name, found):
result = handler.get_attribute(name, result)
found = True
if not found:
raise AttributeError(name)
return result
def __dir__(self):
result = set(dir(type(self)))
result |= set(self.__dict__.keys())
for handler in self._handlers:
result |= handler.attribute_names()
return list(result)
def overridden(self):
return OverriddenCodeInterface(self)
def get_name_of_current_state(self):
return self.state_machine.get_name_of_current_state()
def _create_new_grid(self, builder_function, **extra_arguments):
handler = self.get_handler('PARTICLES')
definition = GridDefinition(handler, grid_class=extra_arguments.get("grid_class", datamodel.Grid))
builder_function(definition, **extra_arguments)
return definition.new_set_instance(handler)
def __setstate__(self, state):
self.__dict__ = state
def data_store_names(self):
self.before_get_data_store_names()
return list(self.get_handler('PARTICLES').mapping_from_name_to_set_definition.keys())
def parameter_set_names(self):
#~ self.before_get_data_store_names()
return list(self.get_handler('PARAMETER').definitions.keys())
@property
def model_name(self):
return type(self).__name__
class IncorrectMethodDefinition(IncorrectWrappedMethodException):
formatstring = "Incorrect definition of method '{0}' of class '{1}', the number of {4} do not match, expected {2}, actual {3}."
class PropertyDefinition(object):
def __init__(self, handler, functionname, publicname, keyword_arguments={}):
self.functionname = functionname
self.publicname = publicname
self.handler = handler
self.keyword_arguments = {}
self._method = None
def get_value(self, original):
if self._method is None:
self._method = getattr(self.handler.interface, self.functionname)
result = self._method (**self.keyword_arguments)
if hasattr(result, "__iter__"):
return quantities.VectorQuantity.new_from_scalar_quantities(*result)
else:
return result
| 61,635
| 35.514218
| 210
|
py
|
amuse
|
amuse-main/src/amuse/support/parameter_tools.py
|
import numpy
from configparser import ConfigParser
from collections import defaultdict, OrderedDict
from io import StringIO
try:
import f90nml
HAS_F90NML=True
except:
HAS_F90NML=False
from amuse.units.quantities import new_quantity, to_quantity, is_quantity
# parameters can be supplied as:
#
# parameters=(
# dict(name="name", group_name="name", short="codename", dtype="int32", default=64, description="description", ptype="nml" [, set_name="name"]), ...
# )
dtype_str={ str: "str",
bool: "bool",
int: "int32",
float: "float64",
complex: "complex",
list: "list",
dict: "dict",
OrderedDict: "OrderedDict",
}
def parameter_list_py_code(parameters, label="parameters"):
header="""from omuse.units import units
from collections import OrderedDict
{label} = (
""".format(label=label)
template=' dict(group_name={group}, name={name}, short={short}, dtype={dtype}, default={default}, description={description}, ptype={ptype}),\n'
footer=""")
"""
by_group=defaultdict(dict)
for key in parameters:
short,group=key
by_group[group][short]=parameters[key]
body=[]
for group in by_group:
_body=""
for short in by_group[group]:
_body+=template.format(name=by_group[group][short]["name"].__repr__(),
group=by_group[group][short]["group_name"].__repr__(),
short=by_group[group][short]["short"].__repr__(),
dtype=by_group[group][short]["dtype"].__repr__(),
default=by_group[group][short]["default"].__repr__(),
description=by_group[group][short]["description"].__repr__(),
ptype=by_group[group][short]["ptype"].__repr__(),)
body.append(_body)
body="#\n".join(body)
return header+body+footer
class _CodeWithFileParameters(object):
_ptypes=None
def _write_file(self, inputfile, **kwargs):
raise Exception("not implemented")
def _read_file(self, inputfile, rawvals, **kwargs):
raise Exception("not implemented")
def define_parameters(self, handler):
_tmp=dict()
for p in self._parameters.values():
if p["ptype"] not in self._ptypes:
continue
parameter_set_name=p.get("set_name", None) or self._prefix+p["group_name"].replace(" ","_")
if parameter_set_name not in _tmp:
_tmp[parameter_set_name]=[ x.name for x in handler.definitions[parameter_set_name] ]
if not p["name"] in _tmp[parameter_set_name]:
handler.add_interface_parameter( p["name"], p["description"], p["default"],
"before_set_interface_parameter", parameter_set=parameter_set_name)
self.set_parameters()
def set_parameters(self):
for p in self._parameters.values():
if p["ptype"] not in self._ptypes:
continue
parameter_set_name=p.get("set_name", None) or self._prefix+p["group_name"].replace(" ","_")
parameter_set=getattr(self, parameter_set_name)
name=p["name"]
value=p.get("value", p["default"])
setattr(parameter_set, name, value)
def interpret_value(self,value, dtype=None):
raise Exception("not implemented")
def read_parameters(self, inputfile, add_missing_parameters=False):
self._file=inputfile
_nml_params = f90nml.read(inputfile)
rawvals, comments = self._read_file(inputfile)
for key, rawval in rawvals.items():
if key in self._parameters:
group_name=self._parameters[key]["group_name"]
name=self._parameters[key]["name"]
dtype=self._parameters[key]["dtype"]
val=self.interpret_value( rawval, dtype=dtype)
if is_quantity(self._parameters[key]["default"]):
self._parameters[key]["value"]=new_quantity(val, to_quantity(self._parameters[key]["default"]).unit)
else:
self._parameters[key]["value"]=val
else:
if not add_missing_parameters:
print("'{0}' of group '{1}' not in the parameters list".format(*key))
else:
value=rawval
description=comments.get(key, "unknown parameter read from {0}".format(inputfile))
self._parameters[key]=dict(
group_name=key[1],
name=key[0],
short_name=key[0],
default=value,
value=value,
short=key[0],
ptype=self._ptypes[0],
dtype=dtype_str[type(value)],
description=description
)
def write_parameters(self, outputfile, **options):
rawvals=dict()
for key, p in self._parameters.items():
name=p["name"]
group_name=p["group_name"]
short=p["short"]
parameter_set_name=p.get("set_name", None) or self._prefix+p["group_name"].replace(" ","_")
parameter_set=getattr(self, parameter_set_name)
if is_quantity(p["default"]):
value=to_quantity(getattr(parameter_set, name)).value_in(p["default"].unit)
else:
value=getattr(parameter_set, name)
rawvals[key]=self.output_format_value(value)
self._write_file(outputfile, rawvals, **options)
class CodeWithNamelistParameters(_CodeWithFileParameters):
"""
Mix-in class to 1) namelist file support to code interfaces and 2) automatically generate
parameter sets from descriptions or namelist files.
This class takes a list of parameter descriptions (optional) and has functions to
read and write namelist files. Every namelist section corresponds to a different
parameter set.
"""
_ptypes=["nml", "nml+normal"]
def __init__(self, _parameters, prefix="parameters_"):
if not HAS_F90NML:
raise Exception("f90nml package not available")
self._parameters=dict([((x["short"].lower(),x["group_name"]),x) for x in _parameters])
self._prefix=prefix
self._file=None
def _read_file(self, inputfile):
_nml_params = f90nml.read(inputfile).todict()
rawvals=OrderedDict()
for group, d in _nml_params.items():
for short, val in d.items():
key=(short.lower(),group.upper())
rawvals[key]=val
return rawvals, dict()
def _write_file(self, outputfile, rawvals, do_patch=False, nml_file=None):
patch=OrderedDict()
for key,rawval in rawvals.items():
if rawval is None: # omit if value is None
continue
if isinstance(rawval,numpy.ndarray):
rawval=list(rawval) # necessary until f90nml supports numpy arrays
if key[1] not in patch:
patch[key[1]]=OrderedDict()
patch[key[1]][key[0]]=rawval
if do_patch:
_tmp=f90nml.read(nml_file or self._nml_file)
_tmp.update(patch)
f90nml.write(_tmp, outputfile, force=True)
# workaround because this can produce errors (f90nml 1.1.2):
#~ f90nml.patch(nml_file or self._nml_file,f90nml.Namelist(patch),outputfile)
else:
f90nml.write(patch, outputfile, force=True)
def write_namelist_parameters(self, outputfile, do_patch=False, nml_file=None):
return self.write_parameters(outputfile, do_patch=do_patch, nml_file=nml_file)
def read_namelist_parameters(self, inputfile, add_missing_parameters=False):
return self.read_parameters(inputfile,add_missing_parameters)
def output_format_value(self,value):
return value
def interpret_value(self,value, dtype=None):
return value # dtype, arrays should be handled by f90nml
class CodeWithIniFileParameters(_CodeWithFileParameters):
"""
Mix-in class to 1) INI-like file support to code interfaces and 2) automatically generate
parameter sets from descriptions or Ini files.
This class takes a list of parameter descriptions (optional) and has functions to
read and write INI files. Every section corresponds to a different parameter set.
"""
_ptypes=["ini", "ini+normal"]
def __init__(self, _parameters=None, prefix="ini_"):
if _parameters is None:
_parameters=dict()
self._parameters=dict([((x["name"],x["group_name"]),x) for x in _parameters])
self._optionxform=str
self._prefix=prefix
self._file=None
def _read_file(self, inputfile):
f=open(inputfile,"r")
values=StringIO()
comments=StringIO()
for line in f.readlines():
if "=" in line:
key, val=line.split("=", 1)
if "#" in val:
val,comment=val.split("#",1)
comments.write("=".join([key,comment])+"\n")
values.write("=".join([key,val])+"\n")
else:
values.write(line+"\n")
comments.write(line+"\n")
values.seek(0)
comments.seek(0)
rawvals=self.parse_fp(values)
comments=self.parse_fp(comments)
return rawvals, comments
def parse_fp(self, fp):
parser=ConfigParser()
parser.optionxform=self._optionxform
parser.readfp(fp)
rawvals=dict()
for section in parser.sections():
group=section
for option in parser.options(section):
key=(option,group)
rawvals[key]=parser.get(group, option)
return rawvals
def _convert(self, value, dtype):
if dtype=="bool":
if value.lower() in ["0", "false", "off","no"]:
return False
else:
return True
if dtype in ["str", None]:
return value
return numpy.fromstring(value, sep=",")[0]
def interpret_value(self,value, dtype=None):
if value=="":
return value
if value.find(',')>=0:
return [self._convert(x, dtype) for x in value.split(",")]
return self._convert(value, dtype)
def _write_file(self, outputfile, rawvals):
parser=ConfigParser()
parser.optionxform=self._optionxform
for key, rawval in rawvals.items():
section=key[1]
short=key[0]
if not parser.has_section(section):
parser.add_section(section)
if isinstance(rawval, list):
rawval=','.join(rawval)
parser.set(section,short,self.output_format_value(rawval))
f=open(outputfile, "w")
parser.write(f)
f.close()
def output_format_value(self,value):
if isinstance(value, list):
return ','.join([str(v) for v in value])
else:
return str(value)
def write_inifile_parameters(self, outputfile):
return self.write_parameters(outputfile)
def read_inifile_parameters(self, inputfile, add_missing_parameters=False):
return self.read_parameters(inputfile,add_missing_parameters)
| 11,751
| 35.496894
| 150
|
py
|
amuse
|
amuse-main/src/amuse/support/code.py
|
from amuse.support import interface
from amuse.support.options import OptionalAttributes
from amuse.support.core import late
from amuse import datamodel
import types
import inspect
import re
def _getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
class StateMethodDescriptor(object):
def __init__(self, states, function):
self.states = states
self.function = function
state_doc = "STATE: can run in states {0}".format(self.states)
doc_string = _getdoc(function)
print(doc_string)
if doc_string:
doc_string += '\n'
doc_string += state_doc
self.function.__doc__ = doc_string
def __get__(self, instance, owner):
if instance is None:
return self.function
else:
method = types.MethodType(self.function, instance)
name = self.get_name()
function_with_state = instance.state_handler.get_attribute(name , method)
setattr(instance, name, function_with_state)
return function_with_state
def get_name(self):
if isinstance(self.function,StateMethodDescriptor):
return self.function.get_name()
else:
return self.function.__name__
def define_on(self, handler):
for state in self.states:
handler.add_method(state, self.function.__name__)
class StateTransitionDescriptor(object):
def __init__(self, from_state, to_state, function):
self.from_state = from_state
self.to_state = to_state
self.function = function
def __get__(self, instance, owner):
if instance is None:
return self.get_function()
else:
function = self.get_function()
method = types.MethodType(function, instance)
name = self.get_name()
function_with_state = instance.state_handler.get_attribute(name , method)
setattr(instance, name, function_with_state)
return function_with_state
def get_name(self):
if isinstance(self.function,StateTransitionDescriptor):
return self.function.get_name()
else:
return self.function.__name__
def get_function(self):
if isinstance(self.function,StateTransitionDescriptor):
return self.function.get_function()
else:
return self.function
def define_on(self, handler):
handler.add_transition(self.from_state, self.to_state, self.get_name())
if isinstance(self.function,StateTransitionDescriptor):
self.function.define_on(handler)
def state_method(*arguments):
def f(func) :
return StateMethodDescriptor(arguments, func)
return f
def state_transition(from_state, to_state):
def f(func) :
return StateTransitionDescriptor(from_state, to_state, func)
return f
class MetaObjectWithState(type):
def __new__(mcs, name, bases, dict):
definitions = []
for x in bases:
if hasattr(x, '__state_definitions__'):
definitions.extend(getattr(x, '__state_definitions__'))
for key, value in dict.items():
if isinstance(value,StateTransitionDescriptor):
definitions.append(value)
elif isinstance(value,StateMethodDescriptor):
definitions.append(value)
dict['__state_definitions__'] = definitions
result = type.__new__(mcs, name, bases, dict)
return result
class ObjectWithState(object, metaclass=MetaObjectWithState):
INITIAL_STATE = 'START'
def __init__(self):
self.state_handler = interface.HandleState(self)
for x in self.__state_definitions__:
x.define_on(self.state_handler)
self.state_handler.set_initial_state(self.INITIAL_STATE)
class StoppingCondition(object):
def __init__(self, name):
self.name = name
self._is_enabled = False
self._is_set = False
self._particles = [datamodel.Particles() for x in range(256)]
def is_supported(self):
return True
def is_enabled(self):
return self._is_enabled
def is_set(self):
return self._is_enabled and self._is_set
def enable(self):
self._is_enabled = True
def disable(self):
self._is_enabled = False
def set(self, *particles):
self._is_set = True
for i, particle_set in enumerate(particles):
if len(self._particles) == i:
self._particles.append(datamodel.Particles())
self._particles[i].add_particles(particle_set)
def unset(self):
if self._is_set:
self._is_set = False
self._particles = [datamodel.Particles() for x in range(10)]
def particles(self, index):
if index >= len(self._particles):
return datamodel.Particles()
return self._particles[index]
| 5,372
| 30.421053
| 85
|
py
|
amuse
|
amuse-main/src/amuse/support/literature.py
|
import sys
try:
from docutils import core
except ValueError:
import os
import locale
os.environ['LC_CTYPE'] = 'C'
os.environ['LANG'] = 'C'
from docutils import core
import atexit
import shutil
import traceback
import importlib
from os.path import exists
from collections import namedtuple
from docutils import nodes
from amuse.support import exceptions
try:
from amuse.version import version as amuse_version
except ImportError:
amuse_version = "unknown version"
import amuse
ClassWithLiteratureReferences = namedtuple(
"ClassWithLiteratureReferences",
"name_of_class_with_refs literature_references_of_class"
)
LiteratureReference = namedtuple(
"LiteratureReference",
"id footnote"
)
class TrackLiteratureReferences:
"""
.. [#] DOI:10.5281/zenodo.1435860
.. [#] ADS:2018araa.book.....P (Portegies Zwart, S. & McMillan, S.L.W., 2018)
.. [#] ADS:2013CoPhC.183..456P ** (Portegies Zwart, S. et al., 2013)
.. [#] ADS:2013A&A...557A..84P ** (Pelupessy, F. I. et al., 2013)
.. [#] ADS:2009NewA...14..369P (Portegies Zwart, S. et al., 2009)
"""
INSTANCE = None
def __init__(self):
self.registered_classes = set([])
self.must_show_literature_references_atexit = True
self.original_excepthook = None
@classmethod
def default(cls):
if cls.INSTANCE is None:
cls.INSTANCE = cls()
cls.INSTANCE.register()
return cls.INSTANCE
def register(self):
self.original_excepthook = sys.excepthook
sys.excepthook = self.exception_hook
if "--bibtex" in sys.argv:
atexit.register(self.atexit_bibtex_hook)
else:
atexit.register(self.atexit_hook)
@classmethod
def suppress_output(cls):
cls.default().must_show_literature_references_atexit = False
def register_class(self, cls):
self.registered_classes.add(cls)
def exception_hook(self, *arguments):
# print "exception", arguments, self.original_excepthook
self.must_show_literature_references_atexit = False
lines = traceback.format_exception(*arguments)
# print ''.join(lines)
self.original_excepthook(*arguments)
def atexit_bibtex_hook(self):
if self.original_excepthook is not None:
sys.excepthook = self.original_excepthook
self.original_excepthook = None
if (
self.must_show_literature_references_atexit
and "--no-report-references" not in sys.argv
):
texstring = self.all_literature_references_texstring()
if texstring:
tex_filename = f"bib-{sys.argv[0]}.tex"
bib_filename = f"bib-{sys.argv[0]}.bib"
filenumber = 0
while exists(tex_filename) or exists(bib_filename):
filenumber += 1
tex_filename = f"bib-{sys.argv[0]}-{filenumber}.tex"
bib_filename = f"bib-{sys.argv[0]}-{filenumber}.bib"
terminal_message = f"""
In this session you have used the modules below.
Please use the {tex_filename} and {bib_filename} files to include the relevant
citations.
"""
with open(tex_filename, 'w') as tex_out:
tex_out.write(
f"{texstring}"
)
shutil.copyfile(amuse.get_data('AMUSE.bib'), bib_filename)
print(terminal_message)
print(self.all_literature_references_string())
def atexit_hook(self):
if self.original_excepthook is not None:
sys.excepthook = self.original_excepthook
self.original_excepthook = None
if (
self.must_show_literature_references_atexit
and "--no-report-references" not in sys.argv
):
string = self.all_literature_references_string()
if string:
prefix = """
In this session you have used the AMUSE modules below.
Please cite any relevant articles:
"""
print(prefix + string)
def get_literature_dict_of_class(self, cls):
"""
get the name and bibkeys from the class, as a dict.
"""
result = {}
for current_class in cls.__mro__:
docstring_in = current_class.__doc__
if docstring_in:
if hasattr(current_class, "version"):
version = current_class.version()
else:
version = amuse_version
name = current_class.__name__
if name.endswith("Interface"):
name = "AMUSE-" + name[:-9]
objectname = f"{name} ({version})"
doctree = core.publish_doctree(source=docstring_in)
ref_keys = list(doctree.ids.keys())
natsort(ref_keys)
ref_values = [doctree.ids[key] for key in ref_keys]
for ival in ref_values:
if isinstance(ival, nodes.footnote):
line = ival.rawsource.split()[0]
if (
line.startswith('ADS:')
or line.startswith('DOI:')
):
if objectname in result.keys():
result[objectname] += [line[4:]]
else:
result[objectname] = [line[4:]]
return result
def get_literature_list_of_class(self, cls):
"""
filter the refs from the docstring, if there are no refs nothing is
appended
"""
result = []
for current_class in cls.__mro__:
docstring_in = current_class.__doc__
if docstring_in:
if hasattr(current_class, "version"):
version = current_class.version()
else:
version = amuse_version
name = current_class.__name__
if name.endswith("Interface"):
name = "AMUSE-" + name[:-9]
objectname = f"{name} ({version})"
doctree = core.publish_doctree(source=docstring_in)
ref_keys = list(doctree.ids.keys())
natsort(ref_keys)
ref_values = [doctree.ids[key] for key in ref_keys]
literature_references_of_class = []
for ikey, ival in zip(ref_keys, ref_values):
if isinstance(ival, nodes.footnote):
literature_references_of_class.append(
LiteratureReference(ikey, ival.rawsource)
)
filled = len(literature_references_of_class) > 0
if filled:
result.append(
ClassWithLiteratureReferences(
objectname,
literature_references_of_class
)
)
return result
def get_literature_dict(self):
result = {}
for x in self.registered_classes:
if sys.version_info.major == 3 and sys.version_info.minor >= 9:
result |= self.get_literature_dict_of_class(x)
else:
result = {**result, **self.get_literature_dict_of_class(x)}
return result
def get_literature_list(self):
result = []
for x in self.registered_classes:
result.extend(self.get_literature_list_of_class(x))
return result
def all_literature_references_string(self):
lines = []
for s in self.get_literature_list():
lines.append(
f'\n\t"{s.name_of_class_with_refs}"'
)
for literature_reference_of_class_item in s.literature_references_of_class:
lines.append(
f'\t\t{literature_reference_of_class_item.footnote}'
)
lines.append(f'\n\t"AMUSE ({amuse_version})"')
amuse_list = self.get_literature_list_of_class(type(self))
for x in amuse_list:
for literature_reference_of_class_item in x.literature_references_of_class:
lines.append(
f'\t\t{literature_reference_of_class_item.footnote}'
)
return "\n".join(lines)
def all_literature_references_texstring(self):
result = 'In this article, we used the following AMUSE modules: '
result += f'AMUSE-framework {amuse_version} \\citep{{'
amuse_lib = self.get_literature_dict_of_class(type(self))
for name in amuse_lib.keys():
for j, key in enumerate(amuse_lib[name]):
result += f'{key}'
if j != len(amuse_lib[name]) - 1:
result += ', '
result += '}'
lib = self.get_literature_dict()
for name in lib.keys():
result += (
f', {name} \\citep{{'
)
for j, key in enumerate(lib[name]):
result += f'{key}'
if j != len(lib[name]) - 1:
result += ', '
result += '}'
result += '.\n'
return result
def names_of_classes_with_references(self):
return [x.name_of_class_with_refs for x in self.get_literature_list()]
def literature_references():
return TrackLiteratureReferences.default().all_literature_references_string()
class LiteratureReferencesMixIn(object):
def __init__(self):
self.register_use()
@classmethod
def version(cls):
try:
version = importlib.import_module(
'..version',
cls.__module__
).version
except (ImportError, ValueError):
try:
from amuse.version import version
except ImportError:
version = "unknown"
return version
@classmethod
def print_literature_references(cls):
print("You are currently using the following codes, which contain literature references")
print(TrackLiteratureReferences.default().all_literature_references_string())
@classmethod
def export2html(cls):
pass
@classmethod
def export2bibtex(cls):
pass
@classmethod
def names_of_classes_with_references(cls):
return TrackLiteratureReferences.default().names_of_classes_with_references()
@classmethod
def all_literature_references_string(cls):
return TrackLiteratureReferences.default().all_literature_references_string()
@classmethod
def register_use(cls):
TrackLiteratureReferences.default().register_class(cls)
# ------------------------------------------------------------------------------
# from natsort.py: Natural string sorting by Seo Sanghyeon and Connelly Barnes.
# ------------------------------------------------------------------------------
def try_int(s):
"Convert to integer if possible."
try:
return int(s)
except ValueError:
return s
def natsort_key(s):
"Used internally to get a tuple by which s is sorted."
import re
return list(map(try_int, re.findall(r'(\d+|\D+)', s)))
def natsort(seq):
"In-place natural string sort."
seq.sort(key=natsort_key)
| 11,438
| 32.545455
| 97
|
py
|
amuse
|
amuse-main/src/amuse/support/thirdparty/texttable.py
|
#!/usr/bin/env python
#
# texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2015 Gerome Fournier <jef(at)foutaise.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""module for creating simple ASCII tables
Example:
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"]])
print table.draw() + "\\n"
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
__all__ = ["Texttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'LGPL'
__version__ = '0.8.3'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
Frank Sachsenheim:
- add Python 2/3-compatibility
Maximilian Hils:
- fix minor bug for Python 3 compatibility
"""
import sys
import string
try:
if sys.version >= '2.3':
import textwrap
elif sys.version >= '2.2':
from optparse import textwrap
else:
from optik import textwrap
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
if sys.version >= '2.7':
from functools import reduce
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if not isinstance(iterable, str):
return iterable.__len__()
try:
if sys.version >= '3.0':
return len(str)
else:
return len(str(iterable, 'utf'))
except:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
if max_width <= 0:
max_width = False
self._max_width = max_width
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either "a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(str, array))
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(next(rows))
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
try:
f = float(x)
except:
return str(x)
n = self._precision
dtype = self._dtype[i]
if dtype == 'i':
return str(int(round(f)))
elif dtype == 'f':
return '%.*f' % (n, f)
elif dtype == 'e':
return '%.*e' % (n, f)
elif dtype == 't':
return str(x)
else:
if f - round(f) == 0:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return str(int(round(f)))
else:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return '%.*f' % (n, f)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
items = len(maxi)
length = reduce(lambda x, y: x+y, maxi)
if self._max_width and length + items * 3 + 1 > self._max_width:
maxi = [(self._max_width - items * 3 -1) // items \
for n in range(items)]
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = "c"
if align == "r":
out += "%s " % (fill * space + cell_line)
elif align == "c":
out += "%s " % (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += "%s " % (cell_line + fill * space)
if length < len(line):
out += "%s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
try:
if sys.version >= '3.0':
c = str(c)
else:
c = str(c, 'utf')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (c, strerror))
if sys.version >= '3.0':
c = str(c, 'utf', 'replace')
else:
c = str(c, 'utf', 'replace')
array.extend(textwrap.wrap(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"]])
print((table.draw() + "\n"))
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print((table.draw()))
| 18,956
| 30.179276
| 106
|
py
|
amuse
|
amuse-main/src/amuse/support/thirdparty/__init__.py
| 0
| 0
| 0
|
py
|
|
amuse
|
amuse-main/src/amuse/rfi/import_module.py
|
import sys
import ctypes
import tempfile
import shutil
import os
import atexit
class _ModuleRegister(object):
is_cleanup_registered = False
files_to_cleanup = []
def find_shared_object_file(dirpath, base_libname):
for path in sys.path:
fullpath = os.path.join(path, dirpath)
if os.path.exists(fullpath) and os.path.isdir(fullpath):
full_libname = os.path.join(fullpath, base_libname)
if os.path.exists(full_libname):
return full_libname
return base_libname
def find_module(modulename):
parts = modulename.split('.')
modulename = parts[-1]
dirparts = parts[:-1]
base_libname = modulename + '.so'
if len(dirparts) > 0:
dirpath = os.path.join(*dirparts)
else:
dirpath = ''
libname = find_shared_object_file(dirpath, base_libname)
if not os.path.exists(libname):
raise Exception("cannot find the shared object file of the module '{0}'".format(modulename))
return modulename, libname
def import_unique(modulename):
modulename, libname = find_module(modulename)
if modulename in sys.modules:
prevmodule = sys.modules[modulename]
else:
prevmodule = None
if not _ModuleRegister.is_cleanup_registered:
_ModuleRegister.is_cleanup_registered = True
atexit.register(cleanup)
if not os.path.exists('__modules__'):
os.mkdir('__modules__')
try:
with tempfile.NamedTemporaryFile(suffix=".so", dir='__modules__', delete=False) as target:
with open(libname, "rb") as source:
shutil.copyfileobj(source, target)
target.flush()
_ModuleRegister.files_to_cleanup.append(target.name)
lib = ctypes.pydll.LoadLibrary(target.name)
initfunc = getattr(lib, "init"+modulename)
initfunc()
result = sys.modules[modulename]
result.__ctypeslib__ = lib
result.__ctypesfilename__ = target.name
result.__cleanup__ = cleanup_module
return sys.modules[modulename]
finally:
if prevmodule is None and modulename in sys.modules:
del sys.modules[modulename]
else:
sys.modules[modulename] = prevmodule
def cleanup():
for filename in _ModuleRegister.files_to_cleanup:
if os.path.exists(filename):
try:
os.remove(filename)
except Exception as ex:
print("Could not delete file:",filename,", exception:",ex)
# this struct will be passed as a ponter,
# so we don't have to worry about the right layout
class dl_phdr_info(ctypes.Structure):
_fields_ = [
('padding0', ctypes.c_void_p), # ignore it
('dlpi_name', ctypes.c_char_p),
# ignore the reset
]
# call back function, I changed c_void_p to c_char_p
callback_t = ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(dl_phdr_info),
ctypes.POINTER(ctypes.c_size_t), ctypes.c_char_p)
dl_iterate_phdr = ctypes.CDLL('libc.so.6').dl_iterate_phdr
# I changed c_void_p to c_char_p
dl_iterate_phdr.argtypes = [callback_t, ctypes.c_char_p]
dl_iterate_phdr.restype = ctypes.c_int
count = [0]
def callback(info, size, data):
# simple search
print("CLEANUP:", info.contents.dlpi_name)
count[0] += 1
return 0
def cleanup_module(mod):
#print "CLEANUP!!"
#sys.stdout.flush()
#print "CLEANUP:", mod, len(list(os.listdir('/proc/self/fd')))
#count[0] = 0
#dl_iterate_phdr(callback_t(callback), "")
#print "CLEANUP:", count[0]
sys.stdout.flush()
if hasattr(mod, '__ctypeslib__') and not mod.__ctypeslib__ is None:
lib = mod.__ctypeslib__
dlclose = ctypes.cdll.LoadLibrary('libdl.so').dlclose
dlclose.argtypes = [ctypes.c_void_p]
dlclose.restype = ctypes.c_int
errorcode = dlclose(lib._handle)
mod.__ctypeslib__ = None
filename = mod.__ctypesfilename__
if os.path.exists(filename):
try:
os.remove(filename)
except Exception as ex:
print("CLEANUP Could not delete file:",filename,", exception:",ex)
mod.__ctypesfilename__ = None
| 4,329
| 31.80303
| 100
|
py
|
amuse
|
amuse-main/src/amuse/rfi/async_request.py
|
import select
import operator
from . import channel
class AbstractASyncRequest(object):
def __bool__(self):
return not self.is_finished
def waitone(self):
return self.wait()
def waitall(self):
while not self.is_finished:
self.wait()
def wait(self):
raise Exception("not implemented")
@property
def is_finished(self):
return self._is_finished
@property
def is_result_set(self):
return self._is_result_set
def is_result_available(self):
raise Exception("not implemented")
def is_failed(self):
if not self.is_finished:
return False
return not self.is_result_set
def result(self):
raise Exception("not implemented")
@property
def results(self):
return [self.result()]
def add_result_handler(self, function, args = ()):
self.result_handlers.append([function,args])
def is_mpi_request(self):
return False
def is_socket_request(self):
return False
def is_other(self):
return not self.is_mpi_request() and not self.is_socket_request()
def get_mpi_request(self):
raise Exception("not implemented")
def get_socket(self):
raise Exception("not implemented")
#~ def is_pool(self):
#~ return False
def join(self, other):
if other is None:
return self
elif isinstance(other, AbstractASyncRequest):
pool = AsyncRequestsPool()
pool.add_request(self, lambda x: x.result())
pool.add_request(other, lambda x: x.result())
elif isinstance(other, AsyncRequestsPool):
return other.join(self)
else:
raise Exception("error: join only possible with ASyncRequest or Pool")
return pool
def waits_for(self):
if self.is_finished:
return None
return self
def __getitem__(self, index):
return IndexedASyncRequest(self,index)
#~ def __getattr__(self, name):
#~ print name, "<<"
def __add__(self, other):
return baseOperatorASyncRequest(self,other, operator.add)
def __radd__(self, other):
return baseOperatorASyncRequest(self,other, lambda x,y: operator.add(y,x))
def __sub__(self, other):
return baseOperatorASyncRequest(self,other, operator.sub)
def __rsub__(self, other):
return baseOperatorASyncRequest(self,other, lambda x,y: operator.sub(y,x))
def __mul__(self, other):
return baseOperatorASyncRequest(self,other, operator.__mul__)
def __rmul__(self, other):
return baseOperatorASyncRequest(self,other, lambda x,y: operator.mul(y,x))
def __truediv__(self, other):
return baseOperatorASyncRequest(self,other, operator.truediv)
def __rtruediv__(self, other):
return baseOperatorASyncRequest(self,other, lambda x,y: operator.truediv(y,x))
def __floordiv__(self, other):
return baseOperatorASyncRequest(self,other, operator.floordiv)
def __rfloordiv__(self, other):
return baseOperatorASyncRequest(self,other, lambda x,y: operator.floordiv(y,x))
def __div__(self, other):
return baseOperatorASyncRequest(self,other, operator.div)
def __rdiv__(self, other):
return baseOperatorASyncRequest(self,other, lambda x,y: operator.div(y,x))
def __pow__(self, other):
return baseOperatorASyncRequest(self,other, operator.pow)
def __rpow__(self, other):
return baseOperatorASyncRequest(self,other, lambda x,y: operator.pow(y,x))
def __mod__(self, other):
return baseOperatorASyncRequest(self,other, operator.mod)
def __rmod__(self, other):
return baseOperatorASyncRequest(self,other, lambda x,y: operator.mod(y,x))
def __neg__(self):
return baseOperatorASyncRequest(self, None, operator.neg)
def __iter__(self):
if self._result_index:
for i in self._result_index:
yield self[i]
else:
yield self
#~ def __call__(self):
#~ return self.result()
class DependentASyncRequest(AbstractASyncRequest):
def __init__(self, parent, request_factory):
self._result_index=None
self.request=None
self.parent=parent
if isinstance(parent, AsyncRequestsPool):
self.parent=PoolDependentASyncRequest(parent)
def handler(arg):
result=arg()
self.request=request_factory()
for h in self.result_handlers:
self.request.add_result_handler(*h)
return result
self.parent.add_result_handler(handler)
self.result_handlers = []
@property
def is_result_set(self):
if self.request is None:
return False
return self.request.is_result_set
@property
def is_finished(self):
if self.request is None:
if self.parent.is_finished:
return True
else:
return False
return self.request.is_finished
def wait(self):
try:
self.parent.waitall()
except Exception as ex:
message=str(ex)
if not message.startswith("Error in dependent call: "):
message="Error in dependent call: "+str(ex)
raise type(ex)(message)
if self.request is None:
raise Exception("something went wrong (exception of parent?)")
self.request.wait()
def is_result_available(self):
if self.is_finished:
if self.request is None:
return False
#~ if not self.parent.is_finished:
#~ return False
if self.request is None:
return False
#~ raise Exception("something went wrong (exception of parent?)")
return self.request.is_result_available()
def result(self):
self.wait()
if not self.request.is_result_set:
raise Exception("result unexpectedly not available")
return self.request.result()
@property
def results(self):
return self.parent.results+[self.result()]
def add_result_handler(self, function, args = ()):
if self.request is None:
self.result_handlers.append([function,args])
else:
self.request.add_result_handler(function,args)
def is_mpi_request(self):
if self.request is None:
return self.parent.is_mpi_request()
else:
return self.request.is_mpi_request()
def is_socket_request(self):
if self.request is None:
return self.parent.is_socket_request()
else:
return self.request.is_socket_request()
def waits_for(self):
if self.is_finished:
return None
if self.request is not None:
return self.request
else:
return self.parent.waits_for()
class PoolDependentASyncRequest(DependentASyncRequest):
def __init__(self, parent):
self.parent=parent
self.request=FakeASyncRequest()
self.result_handlers = []
class IndexedASyncRequest(DependentASyncRequest):
def __init__(self, parent, index):
self.parent=parent
self.index=index
self.request=FakeASyncRequest()
self.result_handlers = []
try:
self._result_index=parent._result_index[index]
except:
self._result_index=None
def result(self):
self.wait()
return self.parent.result().__getitem__(self.index)
class baseOperatorASyncRequest(DependentASyncRequest):
def __init__(self, first, second, operator):
self._first=first
self._second=second
self._operator=operator
if isinstance( second, AbstractASyncRequest):
pool=AsyncRequestsPool(first,second)
self.parent=PoolDependentASyncRequest(pool)
else:
self.parent=first
self.request=FakeASyncRequest()
self.result_handlers = []
def result(self):
self.wait()
first=self._first.result()
second=self._second.result() if isinstance( self._second, AbstractASyncRequest) else self._second
if second is None:
return self._operator(first)
return self._operator(first,second)
class ASyncRequest(AbstractASyncRequest):
def __init__(self, request, message, comm, header):
self.request = request
self.message = message
self.header = header
self.comm = comm
self._is_finished = False
self._is_result_set = False
self._called_set_result = False
self._result = None
self.result_handlers = []
self._result_index=None
def wait(self):
if self.is_finished:
return
self._is_finished = True
self.request.Wait()
self._set_result()
def is_result_available(self):
if self.is_finished:
return self._is_result_set
return self.request.Test()
def get_message(self):
return self.message
def _set_result(self):
if self._called_set_result:
return
self._called_set_result=True
class CallingChain(object):
def __init__(self, outer, args, inner):
self.outer = outer
self.inner = inner
self.args = args
def __call__(self):
return self.outer(self.inner, *self.args)
self.message.receive_content(self.comm, self.header)
current = self.get_message
for x, args in self.result_handlers:
current = CallingChain(x, args, current)
self._result = current()
self._is_result_set = True
def result(self):
self.wait()
if not self._is_result_set:
raise Exception("result unexpectedly not available")
return self._result
def is_mpi_request(self):
if self._is_finished:
return False
return True
class ASyncSocketRequest(AbstractASyncRequest):
def __init__(self, message, socket):
self.message = message
self.socket = socket
self._is_finished = False
self._is_result_set = False
self._called_set_result = False
self._result = None
self.result_handlers = []
self._result_index=None
def wait(self):
if self.is_finished:
return
self._is_finished = True
while True:
readables, _r, _x = select.select([self.socket], [], [])
if len(readables) == 1:
break
self._set_result()
def is_result_available(self):
if self.is_finished:
return self._is_result_set
readables, _r, _x = select.select([self.socket], [], [], 0.001)
return len(readables) == 1
def get_message(self):
return self.message
def _set_result(self):
if self._called_set_result:
return
self._called_set_result=True
class CallingChain(object):
def __init__(self, outer, args, inner):
self.outer = outer
self.inner = inner
self.args=args
def __call__(self):
return self.outer(self.inner, *self.args)
self.message.receive(self.socket)
current = self.get_message
for x,args in self.result_handlers:
current = CallingChain(x, args, current)
self._result = current()
self._is_result_set = True
def result(self):
self.wait()
if not self._is_result_set:
raise Exception("result unexpectedly not available")
return self._result
def is_socket_request(self):
if self._is_finished:
return False
return True
class FakeASyncRequest(AbstractASyncRequest):
def __init__(self, result=None):
self._is_finished = False
self._is_result_set = False
self._called_set_result = False
self._result = None
self.__result = result
self.result_handlers = []
self._result_index=None
def wait(self):
if self.is_finished:
return
self._is_finished = True
self._set_result()
def is_result_available(self):
return True
def _set_result(self):
if self._called_set_result:
return
self._called_set_result=True
class CallingChain(object):
def __init__(self, outer, args, inner):
self.outer = outer
self.inner = inner
self.args = args
def __call__(self):
return self.outer(self.inner, *self.args)
current = lambda : self.__result
for x, args in self.result_handlers:
current = CallingChain(x, args, current)
self._result = current()
self._is_result_set = True
def result(self):
self.wait()
if not self._is_result_set:
raise Exception("result unexpectedly not available")
return self._result
class ASyncRequestSequence(AbstractASyncRequest):
def __init__(self, create_next_request, args = ()):
self.create_next_request = create_next_request
self.args = args
self.index = 0
self.current_async_request = self.create_next_request(self.index, *self.args)
self._is_finished = False
self._is_result_set = False
self._called_set_result = False
self._result = None
self.result_handlers = []
self._results = []
self._result_index=None
@property
def is_finished(self):
self._next_request()
return self.current_async_request is None
def wait(self):
if self.is_finished:
return
self._is_finished=True
while self.current_async_request is not None:
self.current_async_request.wait()
self._next_request()
self._set_result()
def waitone(self):
if self.is_finished:
return
self.current_async_request.wait()
self._next_request()
if self.current_async_request is None:
self._is_finished=True
self._set_result()
def _next_request(self):
if self.current_async_request is not None and \
self.current_async_request.is_result_available():
self._results.append(self.current_async_request.result())
self.index += 1
self.current_async_request = self.create_next_request(self.index, *self.args)
if self.current_async_request is None:
self._set_result()
@property
def results(self):
return self._results
def is_result_available(self):
if self.is_finished:
return True
self._next_request()
return self.current_async_request is None
def add_result_handler(self, function, args = ()):
self.result_handlers.append([function,args])
def get_message(self):
return self._results
def _set_result(self):
if self._called_set_result:
return
self._called_set_result=True
class CallingChain(object):
def __init__(self, outer, args, inner):
self.outer = outer
self.inner = inner
self.args = args
def __call__(self):
return self.outer(self.inner, *self.args)
current = self.get_message
for x, args in self.result_handlers:
current = CallingChain(x, args, current)
self._result = current()
self._is_result_set = True
def result(self):
self.wait()
if not self._is_result_set:
raise Exception("result unexpectedly not available")
return self._result
def is_mpi_request(self):
return self.current_async_request.is_mpi_request()
def is_socket_request(self):
return self.current_async_request.is_socket_request()
def waits_for(self):
return self.current_async_request
class AsyncRequestWithHandler(object):
def __init__(self, pool, async_request, result_handler, args=(), kwargs={}):
self.async_request = async_request
if result_handler is None:
def empty(request):
return request.result()
result_handler = empty
self.result_handler = result_handler
self.args = args
self.kwargs = kwargs
self.pool = pool
def run(self):
self.result_handler(self.async_request, *self.args, **self.kwargs)
class AsyncRequestsPool(object):
def __init__(self, *requests):
self.requests_and_handlers = []
self.registered_requests = set([])
for x in requests:
self.add_request(x)
def add_request(self, async_request, result_handler = None, args=(), kwargs={}):
if async_request is None:
return
if async_request in self.registered_requests:
return
#~ raise Exception("Request is already registered, cannot register a request more than once")
self.registered_requests.add(async_request)
self.requests_and_handlers.append(
AsyncRequestWithHandler(
self,
async_request,
result_handler,
args,
kwargs
)
)
def waitall(self):
while len(self) > 0:
self.wait()
def waitone(self):
return self.wait()
def wait(self):
# TODO need to cleanup this code
#
while len(self.requests_and_handlers) > 0:
requests = [x.async_request.waits_for() for x in self.requests_and_handlers if x.async_request.is_other()]
indices = [i for i, x in enumerate(self.requests_and_handlers) if x.async_request.is_other()]
if len(requests) > 0:
for index, x in zip(indices, requests):
if x is not None:
x.waits_for().waitone()
request_and_handler = self.requests_and_handlers[index]
if request_and_handler.async_request.is_result_available():
self.registered_requests.remove(request_and_handler.async_request)
self.requests_and_handlers.pop(index)
request_and_handler.run()
break
requests_ = [x.async_request.waits_for().request for x in self.requests_and_handlers if x.async_request.is_mpi_request()]
indices_ = [i for i, x in enumerate(self.requests_and_handlers) if x.async_request.is_mpi_request()]
requests=[]
indices=[]
for r,i in zip(requests_, indices_):
if r not in requests:
requests.append(r)
indices.append(i)
if len(requests) > 0:
index = channel.MPI.Request.Waitany(requests)
index = indices[index]
request_and_handler = self.requests_and_handlers[index]
request_and_handler.async_request.waits_for().waitone() # will set the finished flag
if request_and_handler.async_request.is_result_available():
self.registered_requests.remove(request_and_handler.async_request)
self.requests_and_handlers.pop(index)
request_and_handler.run()
break
sockets_ = [x.async_request.waits_for().socket for x in self.requests_and_handlers if x.async_request.is_socket_request()]
indices_ = [i for i, x in enumerate(self.requests_and_handlers) if x.async_request.is_socket_request()]
sockets=[]
indices=[]
for r,i in zip(sockets_, indices_):
if r not in sockets:
sockets.append(r)
indices.append(i)
if len(sockets) > 0:
readable, _, _ = select.select(sockets, [], [])
indices_to_delete = []
for read_socket in readable:
index = sockets.index(read_socket)
index = indices[index]
request_and_handler = self.requests_and_handlers[index]
request_and_handler.async_request.waits_for().waitone() # will set the finished flag
if request_and_handler.async_request.is_result_available():
self.registered_requests.remove(request_and_handler.async_request)
indices_to_delete.append(index)
request_and_handler.run()
for x in reversed(list(sorted(indices_to_delete))):
self.requests_and_handlers.pop(x)
if len(indices_to_delete) > 0:
break
def join(self, other):
if other is None:
return self
elif isinstance(other, AbstractASyncRequest):
self.add_request(other, lambda x: x.result())
elif isinstance(other, AsyncRequestsPool):
for x in other.requests_and_handlers:
self.add_request(
x.async_request,
x.result_handler,
args = x.args,
kwargs = x.kwargs
)
else:
raise Exception("can only join request or pool")
return self
def __len__(self):
return len(self.requests_and_handlers)
def __bool__(self):
return len(self)==0
def waits_for(self):
raise Exception("pool has no waits for, should never be called")
| 23,061
| 30.039031
| 134
|
py
|
amuse
|
amuse-main/src/amuse/rfi/core.py
|
import weakref
import atexit
import errno
import os
import sys
import logging
import pydoc
import traceback
import random
import sys
import warnings
import inspect
import functools
# from collections import OrderedDict
from subprocess import Popen, PIPE
from amuse.support import exceptions
from amuse.support.core import late
from amuse.support.core import print_out
from amuse.support.core import OrderedDictionary
from amuse.support.options import OptionalAttributes, option
from amuse.rfi.tools.create_definition import CodeDocStringProperty
from amuse.rfi.channel import MpiChannel
from amuse.rfi.channel import MultiprocessingMPIChannel
from amuse.rfi.channel import DistributedChannel
from amuse.rfi.channel import SocketChannel
from amuse.rfi.channel import is_mpd_running
from amuse.rfi.async_request import DependentASyncRequest
try:
from amuse import config
except ImportError as ex:
class config(object):
is_mpi_enabled = False
CODE_LOG = logging.getLogger("code")
if CODE_LOG.level == logging.NOTSET:
CODE_LOG.setLevel(logging.WARN)
"""
This module implements the code to the define interfaces between python
code and C++ or Fortran codes. It provides the abstract base
class for all community codes.
"""
import numpy
from amuse.rfi.channel import LocalChannel
def ensure_mpd_is_running():
from mpi4py import MPI
if not is_mpd_running():
name_of_the_vendor, version = MPI.get_vendor()
if name_of_the_vendor == 'MPICH2':
process = Popen(['nohup','mpd'])
def _typecode_to_datatype(typecode):
if typecode is None:
return None
mapping = {
'd':'float64',
'i':'int32',
'f':'float32',
's':'string',
'b':'bool',
'l':'int64',
}
if typecode in mapping:
return mapping[typecode]
values = mapping.values()
if typecode in values:
return typecode
raise exceptions.AmuseException("{0} is not a valid typecode".format(typecode))
class CodeFunction(object):
__doc__ = CodeDocStringProperty()
def __init__(self, interface, owner, specification):
"""
Implementation of the runtime call to the remote process.
Performs the encoding of python arguments into lists
of values, sends a message over an MPI channel and
waits for a result message, decodes this message and
returns.
"""
self.interface = interface
self.owner = owner
self.specification = specification
def __call__(self, *arguments_list, **keyword_arguments):
if self.interface.async_request:
try:
self.interface.async_request.wait()
except Exception as ex:
warnings.warn("Ignored exception in async call: " + str(ex))
dtype_to_values = self.converted_keyword_and_list_arguments( arguments_list, keyword_arguments)
handle_as_array = self.must_handle_as_array(dtype_to_values)
if not self.owner is None:
CODE_LOG.info("start call '%s.%s'",self.owner.__name__, self.specification.name)
call_id = random.randint(0, 1000)
try:
self.interface.channel.send_message(call_id, self.specification.id, dtype_to_arguments = dtype_to_values)
dtype_to_result = self.interface.channel.recv_message(call_id, self.specification.id, handle_as_array)
except Exception as ex:
CODE_LOG.info("Exception when calling function '{0}', of code '{1}', exception was '{2}'".format(self.specification.name, type(self.interface).__name__, ex))
raise exceptions.CodeException("Exception when calling function '{0}', of code '{1}', exception was '{2}'".format(self.specification.name, type(self.interface).__name__, ex))
result = self.converted_results(dtype_to_result, handle_as_array)
if not self.owner is None:
CODE_LOG.info("end call '%s.%s'",self.owner.__name__, self.specification.name)
return result
def _async_request(self, *arguments_list, **keyword_arguments):
dtype_to_values = self.converted_keyword_and_list_arguments( arguments_list, keyword_arguments)
handle_as_array = self.must_handle_as_array(dtype_to_values)
call_id = random.randint(0, 1000)
self.interface.channel.send_message(call_id, self.specification.id, dtype_to_arguments = dtype_to_values)
request = self.interface.channel.nonblocking_recv_message(call_id, self.specification.id, handle_as_array)
def handle_result(function):
try:
dtype_to_result = function()
except Exception as ex:
raise exceptions.CodeException("Exception when calling legacy code '{0}', exception was '{1}'".format(self.specification.name, ex))
result=self.converted_results(dtype_to_result, handle_as_array)
return result
request.add_result_handler(handle_result)
return request
def asynchronous(self, *arguments_list, **keyword_arguments):
if self.interface.async_request is not None:
def factory():
return self._async_request(*arguments_list, **keyword_arguments)
request=DependentASyncRequest( self.interface.async_request, factory)
else:
request=self._async_request(*arguments_list, **keyword_arguments)
request._result_index=self.result_index()
def handle_result(function):
result=function()
if self.interface.async_request==request:
self.interface.async_request=None
return result
request.add_result_handler(handle_result)
self.interface.async_request=request
return request
def must_handle_as_array(self, keyword_arguments):
for argument_type, argument_values in keyword_arguments.items():
if argument_values:
count = 0
for argument_value in argument_values:
try:
if not isinstance(argument_value, str):
count = max(count, len(argument_value))
except:
count = max(count, 0)
if count > 0:
return True
return False
"""
Get list of result keys
"""
def result_index(self):
index=[]
for parameter in self.specification.output_parameters:
index.append(parameter.name)
if not self.specification.result_type is None:
index.append("__result")
return index
"""
Convert results from an MPI message to a return value.
"""
def converted_results(self, dtype_to_result, must_handle_as_array):
number_of_outputs = len(self.specification.output_parameters)
result_type = self.specification.result_type
if number_of_outputs == 0:
if result_type is None:
return None
return dtype_to_result[result_type][0]
if number_of_outputs == 1 \
and result_type is None:
for value in dtype_to_result.values():
if len(value) == 1:
if must_handle_as_array:
return value
else:
return value[0]
result = OrderedDictionary()
dtype_to_array = {}
for key, value in dtype_to_result.items():
dtype_to_array[key] = list(reversed(value))
if not result_type is None:
return_value = dtype_to_array[result_type].pop()
for parameter in self.specification.output_parameters:
result[parameter.name] = dtype_to_array[parameter.datatype].pop()
if not result_type is None:
result["__result"] = return_value
return result
"""
Convert keyword arguments and list arguments to an MPI message
"""
def converted_keyword_and_list_arguments(self, arguments_list, keyword_arguments):
dtype_to_values = self.specification.new_dtype_to_values()
input_parameters_seen = set([x.name for x in self.specification.input_parameters])
names_in_argument_list = set([])
for index, argument in enumerate(arguments_list):
parameter = self.specification.input_parameters[index]
names_in_argument_list.add(parameter.name)
values = dtype_to_values[parameter.datatype]
values[parameter.input_index] = argument
input_parameters_seen.remove(parameter.name)
for index, parameter in enumerate(self.specification.input_parameters):
if parameter.name in keyword_arguments:
values = dtype_to_values[parameter.datatype]
values[parameter.input_index] = keyword_arguments[parameter.name]
input_parameters_seen.remove(parameter.name)
for parameter in self.specification.input_parameters:
if (parameter.name in input_parameters_seen) and parameter.has_default_value():
values = dtype_to_values[parameter.datatype]
values[parameter.input_index] = parameter.default
input_parameters_seen.remove(parameter.name)
if input_parameters_seen:
raise exceptions.CodeException("Not enough parameters in call, missing " + str(sorted(input_parameters_seen)))
return dtype_to_values
def __str__(self):
return str(self.specification)
class legacy_function(object):
__doc__ = CodeDocStringProperty()
def __init__(self, specification_function):
"""Decorator for legacy functions.
The decorated function cannot have any arguments. This
means the decorated function must not have a ``self``
argument.
The decorated function must return
a LegacyFunctionSpecification.
>>> class LegacyExample(object):
... @legacy_function
... def evolve():
... specification = LegacyFunctionSpecification()
... return specification
...
>>> x = LegacyExample()
>>> x.evolve.specification #doctest: +ELLIPSIS
<amuse.rfi.core.LegacyFunctionSpecification object at 0x...>
>>> LegacyExample.evolve #doctest: +ELLIPSIS
<amuse.rfi.core.legacy_function object at 0x...>
>>> x.evolve #doctest: +ELLIPSIS
<amuse.rfi.core.CodeFunction object at 0x...>
:argument specification_function: The function to be decorated
"""
self.specification_function = specification_function
def __get__(self, instance, owner):
if instance is None:
return self
if self.specification.has_units:
return CodeFunctionWithUnits(instance, owner, self.specification)
else:
return CodeFunction(instance, owner, self.specification)
def __set__(self, instance, value):
return
@late
def specification(self):
"""
Returns the specification for the call.
"""
result = self.specification_function()
if result.name is None:
result.name = self.specification_function.__name__
if result.id is None:
result.id = abs(self.crc32(result.name))
if result.description is None:
result.description = pydoc.getdoc(self.specification_function)
return result
def is_compiled_file_up_to_date(self, time_of_the_compiled_file):
name_of_defining_file = self.specification_function.__code__.co_filename
if os.path.exists(name_of_defining_file):
time_of_defining_file = os.stat(name_of_defining_file).st_mtime
return time_of_defining_file <= time_of_the_compiled_file
return True
@late
def crc32(self):
try:
from zlib import crc32
# python 3, crc32 needs bytes...
def python3_crc32(x):
x = crc32(bytes(x, 'ascii'))
return x - ((x & 0x80000000) << 1)
if python3_crc32('amuse') & 0xffffffff == 0xc0cc9367:
return python3_crc32
except Exception:
pass
try:
from binascii import crc32
# python 3, crc32 needs bytes...
def python3_crc32(x):
x = crc32(bytes(x, 'ascii'))
return x - ((x & 0x80000000) << 1)
if python3_crc32('amuse') & 0xffffffff == 0xc0cc9367:
return python3_crc32
except Exception:
pass
raise Exception("No working crc32 implementation found!")
def derive_dtype_unit_and_default(value):
if value is None:
return None,None,None
try:
unit=value.unit
number=value.number
except:
unit=None
number=value
try:
dtype=number.dtype.__str__()
default=number
except:
if number in [ 'd','float64','i','int32','f','float32',
's','string','b','bool','l','int64']:
dtype=number
default=None
else:
if isinstance(number,type):
number=number()
default=None
else:
default=number
if isinstance(number, bool):
dtype="b"
elif isinstance(number,int):
dtype="i"
elif isinstance(number,float):
dtype="d"
elif isinstance(number,str):
dtype="s"
else:
raise Exception("undetectable type")
return dtype,unit,default
def get_function_specification(name,in_arg,out_arg,must_handle_array=False,
can_handle_array=False,length_arguments=None):
function=LegacyFunctionSpecification()
function.name=name
function.must_handle_array=must_handle_array
function.can_handle_array=can_handle_array
if "__result" in out_arg:
result=out_arg.pop("__result")
dtype,unit,dummy=derive_dtype_unit_and_default(result)
function.result_type=dtype
function.result_unit=unit
else:
function.result_type='i'
function.result_unit=None
inout_arg=dict()
for arg in in_arg.keys():
if arg in out_arg:
inout_arg[arg]=in_arg.pop(arg)
out_arg.pop(arg)
for arg,value in in_arg.items():
dtype,unit,default=derive_dtype_unit_and_default(value)
function.addParameter(arg, dtype=dtype, direction=function.IN ,unit=unit,default=default)
for arg,value in inout_arg.items():
dtype,unit,default=derive_dtype_unit_and_default(value)
function.addParameter(arg, dtype=dtype, direction=function.INOUT ,unit=unit,default=default)
for arg,value in out_arg.items():
dtype,unit,default=derive_dtype_unit_and_default(value)
function.addParameter(arg, dtype=dtype, direction=function.OUT ,unit=unit,default=default)
if function.must_handle_array:
if length_arguments:
name=length_arguments[0]
else:
name="N"
function.addParameter(name, dtype='i', direction=function.LENGTH)
return function
def simplified_function_specification(must_handle_array=False,can_handle_array=False):
def wrapper(f):
argspec=inspect.getfullargspec(f)
nkw=len(argspec.defaults) if argspec.defaults else 0
defaults=argspec.defaults if argspec.defaults else []
length_arguments=argspec.args[0:-nkw]
kwargs=argspec.args[-nkw:]
in_arg=OrderedDictionary()
for x,y in zip(kwargs,defaults):
in_arg[x]=y
out_arg=[]
flatsrc=inspect.getsource(f).replace("\n","").replace(" ","")
def returns(**kwargs):
start=flatsrc.find("returns(")
order=lambda k: flatsrc.find(k[0]+"=",start)
out_arg.extend(sorted(kwargs.items(),key=order))
f.__globals__['returns']=returns
f(*argspec.args)
out_arg_mapping=OrderedDictionary()
for x in out_arg:
out_arg_mapping[x[0]] = x[1]
function=get_function_specification(f.__name__,in_arg,out_arg_mapping,
must_handle_array,can_handle_array,length_arguments)
def g():
return function
return g
return wrapper
def remote_function(f=None,must_handle_array=False,can_handle_array=False):
# If called without method, we've been called with optional arguments.
# We return a decorator with the optional arguments filled in.
# Next time round we'll be decorating method.
if f is None:
return functools.partial(remote_function,must_handle_array=must_handle_array,can_handle_array=can_handle_array)
return legacy_function(simplified_function_specification(must_handle_array=must_handle_array,can_handle_array=can_handle_array)(f))
class ParameterSpecification(object):
def __init__(self, name, dtype, direction, description, default = None, unit = None):
"""Specification of a parameter of a legacy function
"""
self.name = name
self.direction = direction
self.input_index = -1
self.output_index = -1
self.description = description
self.datatype = _typecode_to_datatype(dtype)
self.default = default
self.unit = unit
def is_input(self):
return ( self.direction == LegacyFunctionSpecification.IN
or self.direction == LegacyFunctionSpecification.INOUT)
def is_output(self):
return ( self.direction == LegacyFunctionSpecification.OUT
or self.direction == LegacyFunctionSpecification.INOUT)
def has_default_value(self):
return not self.default is None
class LegacyFunctionSpecification(object):
"""
Specification of a legacy function.
Describes the name, result type and parameters of a
legacy function.
The legacy functions are implemented by legacy codes.
The implementation of legacy functions is in C/C++ or Fortran.
To interact with these functions a specification of the
legacy function is needed.
This specification is used to determine how to encode
and decode the parameters and results of the function.
Objects of this class describe the specification of one
function.
>>> specification = LegacyFunctionSpecification()
>>> specification.name = "test"
>>> specification.addParameter("one", dtype="int32", direction = specification.IN)
>>> specification.addParameter("two", dtype="float64", direction = specification.OUT)
>>> specification.result_type = "int32"
>>> print specification
function: int test(int one)
output: double two, int __result
"""
IN = object()
"""Used to specify that a parameter is used as an input parameter, passed by value"""
OUT = object()
"""Used to specify that a parameter is used as an output parameter, passed by reference"""
INOUT = object()
"""Used to specify that a parameter is used as an input and an outpur parameter, passed by reference"""
LENGTH = object()
"""Used to specify that a parameter is used as the length parameter for the other parameters"""
def __init__(self, counter=[0]): # counter serves to be able to put specs in anything resembling sane order (=input order)
counter[0]+=1
self.nspec=counter[0]
self.parameters = []
self.name = None
self.id = None
self.result_type = None
self.result_unit = None
self.description = None
self.input_parameters = []
self.output_parameters = []
self.dtype_to_input_parameters = {}
self.dtype_to_output_parameters = {}
self.can_handle_array = False
self.must_handle_array = False
self.has_units = False
self.result_doc = ''
def set_name(self, name):
self.name = name
def addParameter(self, name, dtype = 'i', direction = IN, description = "", default = None, unit = None):
"""
Extend the specification with a new parameter.
The sequence of calls to addParameter is important. The first
call will be interpreted as the first argument, the second
call as the second argument etc.
:argument name: Name of the parameter, used in documentation and function generation
:argument dtype: Datatype specification string
:argument direction: Direction of the argument, can be IN, OUT or INOUT
:argument description: Description of the argument, for documenting purposes
:argument default: An optional default value for the parameter
"""
parameter = ParameterSpecification(name, dtype, direction, description, default, unit)
self.parameters.append(parameter)
if parameter.is_input():
self.add_input_parameter(parameter)
if parameter.is_output():
self.add_output_parameter(parameter)
def add_input_parameter(self, parameter):
has_default_parameters = any([x.has_default_value() for x in self.input_parameters])
if has_default_parameters and not parameter.has_default_value():
raise exceptions.AmuseException("non default argument '{0}' follows default argument".format(parameter.name))
self.input_parameters.append(parameter)
parameter.index_in_input = len(self.input_parameters) - 1
parameters = self.dtype_to_input_parameters.get(parameter.datatype, [])
parameters.append(parameter)
parameter.input_index = len(parameters) - 1
self.dtype_to_input_parameters[parameter.datatype] = parameters
def add_output_parameter(self, parameter):
self.output_parameters.append(parameter)
parameter.index_in_output = len(self.output_parameters) - 1
parameters = self.dtype_to_output_parameters.get(parameter.datatype, [])
parameters.append(parameter)
parameter.output_index = len(parameters) - 1
self.dtype_to_output_parameters[parameter.datatype] = parameters
def new_dtype_to_values(self):
result = {}
for dtype, parameters in self.dtype_to_input_parameters.items():
result[dtype] = [None] * len(parameters)
return result
def prepare_output_parameters(self):
for dtype, parameters in self.dtype_to_output_parameters.items():
if dtype == self.result_type:
offset = 1
else:
offset = 0
for index, parameter in enumerate(parameters):
parameter.output_index = offset + index
def __str__(self):
typecode_to_name = {'int32':'int', 'float64':'double', 'float32':'float', 'string':'string', 'int64':'long', 'bool':'bool' }
p = print_out()
p + 'function: '
if self.result_type is None:
p + 'void'
else:
p + typecode_to_name[self.result_type]
p + ' '
p + self.name
p + '('
first = True
for x in self.input_parameters:
if first:
first = False
else:
p + ', '
p + typecode_to_name[x.datatype]
p + ' '
p + x.name
p + ')'
if self.output_parameters:
p + '\n'
p + 'output: '
first = True
for x in self.output_parameters:
if first:
first = False
else:
p + ', '
p + typecode_to_name[x.datatype]
p + ' '
p + x.name
if not self.result_type is None:
p + ', '
p + typecode_to_name[self.result_type]
p + ' '
p + '__result'
return p.string
def _get_result_type(self):
return self._result_type
def _set_result_type(self, value):
self._result_type = _typecode_to_datatype(value)
def iter_optional_input_parameters(self):
for x in self.input_parameters:
if x.has_default_value():
yield x
result_type = property(_get_result_type, _set_result_type)
def stop_interfaces(exceptions = []):
"""
Stop the workers of all instantiated interfaces.
All instantiated interfaces will become unstable
after this call!
"""
for reference in reversed(CodeInterface.instances):
x = reference()
if not x is None and x.__class__.__name__ not in exceptions:
try:
x._stop()
except:
pass
for x in CodeInterface.classes:
x.stop_reusable_channels()
class CodeInterface(OptionalAttributes):
"""
Abstract base class for all interfaces to legacy codes.
When a subclass is instantiated, a number of subprocesses
will be started. These subprocesses are called workers
as they implement the interface and do the actual work
of the instantiated object.
"""
instances = []
classes = set([])
is_stop_interfaces_registered = False
def __init__(self, name_of_the_worker = 'worker_code', **options):
"""
Instantiates an object, starting the worker.
Deleting the instance, with ``del``, will stop
the worker.
The worker can be started with a gdb session. The code
will start gdb in a new xterm window. To enable this
debugging support, the ``DISPLAY`` environment variable must be
set and the X display must be accessable, ``xhost +``.
:argument name_of_the_worker: The filename of the application to start
:argument number_of_workers: Number of applications to start. The application must have parallel MPI support if this is more than 1.
:argument debug_with_gdb: Start the worker(s) in a gdb session in a separate xterm
:argument hostname: Start the worker on the node with this name
"""
OptionalAttributes.__init__(self, **options)
self.async_request=None
self.instances.append(weakref.ref(self))
#
#ave: no more redirection in the code
#1) does not seem to work in fortran correctly
#2) seems to break on hydra
#
#if not 'debugger' in options:
# self._redirect_outputs(*self.redirection_filenames)
#
if self.must_start_worker:
self._start(name_of_the_worker = name_of_the_worker, **options)
def __del__(self):
self._stop()
def _check_if_worker_is_up_to_date(self):
self.channel.check_if_worker_is_up_to_date(self)
def _start(self, name_of_the_worker = 'worker_code', interpreter_executable = None, **options):
if self.reuse_worker:
channel = self.retrieve_reusable_channel()
if channel is not None:
self.channel = channel
return
if interpreter_executable is None and self.use_interpreter:
interpreter_executable = self.interpreter
self.channel = self.channel_factory(name_of_the_worker, type(self), interpreter_executable = interpreter_executable, **options)
self._check_if_worker_is_up_to_date()
self.channel.redirect_stdout_file = self.redirection_filenames[0]
self.channel.redirect_stderr_file = self.redirection_filenames[1]
self.channel.polling_interval_in_milliseconds = self.polling_interval_in_milliseconds
#~ self.channel.initialize_mpi = self.initialize_mpi
self.channel.start()
# change to the working directory
if self.working_directory:
result=self.set_working_directory(self.working_directory)
if result!=0:
raise Exception(f"Changing to working directory {self.working_directory} failed")
# must register stop interfaces after channel start
# if done before, the mpi atexit will be registered
# incorrectly
self.ensure_stop_interface_at_exit()
if self.channel.is_polling_supported():
if self.polling_interval_in_milliseconds > 0:
self.internal__set_message_polling_interval(int(self.polling_interval_in_milliseconds * 1000))
def wait(self):
if self.async_request is not None:
self.async_request.wait()
@option(type="int", sections=("channel",))
def polling_interval_in_milliseconds(self):
return 0
@classmethod
def ensure_stop_interface_at_exit(cls):
if not cls.is_stop_interfaces_registered:
atexit.register(stop_interfaces)
cls.is_stop_interfaces_registered = True
@classmethod
def retrieve_reusable_channel(cls):
if not 'REUSE_INSTANCE' in cls.__dict__:
cls.REUSE_INSTANCE = set([])
s = cls.REUSE_INSTANCE
if len(s) > 0:
return s.pop()
else:
return None
@classmethod
def store_reusable_channel(cls, instance):
if not 'REUSE_INSTANCE' in cls.__dict__:
cls.REUSE_INSTANCE = set([])
s = cls.REUSE_INSTANCE
s.add(instance)
cls.classes.add(cls)
@classmethod
def stop_reusable_channels(cls):
if not 'REUSE_INSTANCE' in cls.__dict__:
cls.REUSE_INSTANCE = set([])
s = cls.REUSE_INSTANCE
while len(s) > 0:
x = s.pop()
call_id = random.randint(0, 1000)
# do the _stop_worker call with low level send
# (id == 0, no arguments)
x.send_message(call_id, 0 , dtype_to_arguments = {})
dtype_to_result = x.recv_message(call_id, 0, False)
x.stop()
def _stop(self):
if hasattr(self, 'channel'):
if not self.channel is None and self.channel.is_active():
if self.reuse_worker:
self.store_reusable_channel(self.channel)
self.channel = None
else:
self._stop_worker()
self.channel.stop()
self.channel = None
del self.channel
@legacy_function
def _stop_worker():
function = LegacyFunctionSpecification()
function.id = 0
return function
@legacy_function
def internal__get_message_polling_interval():
"""Gets the message polling interval for MPI
header messages, in microseconds"""
function = LegacyFunctionSpecification()
function.addParameter('polling_interval', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.internal_provided=True
return function
@legacy_function
def internal__set_message_polling_interval():
function = LegacyFunctionSpecification()
function.addParameter('polling_interval', dtype='int32', direction=function.IN)
function.result_type = 'int32'
function.internal_provided=True
return function
@legacy_function
def internal__open_port():
function = LegacyFunctionSpecification()
function.addParameter('port_identifier', dtype='string', direction=function.OUT)
function.result_type = 'int32'
function.internal_provided=True
return function
@legacy_function
def internal__accept_on_port():
function = LegacyFunctionSpecification()
function.addParameter('port_identifier', dtype='string', direction=function.IN)
function.addParameter('comm_identifier', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.internal_provided=True
return function
@legacy_function
def internal__connect_to_port():
function = LegacyFunctionSpecification()
function.addParameter('port_identifier', dtype='string', direction=function.IN)
function.addParameter('comm_identifier', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.internal_provided=True
return function
@legacy_function
def internal__activate_communicator():
function = LegacyFunctionSpecification()
function.addParameter('comm_identifier', dtype='int32', direction=function.IN)
function.result_type = 'int32'
function.internal_provided=True
return function
def stop(self):
self._stop()
@option(choices=['mpi','remote','distributed', 'sockets', 'local'], sections=("channel",))
def channel_type(self):
return 'mpi'
@option(type="boolean", sections=("channel",))
def initialize_mpi(self):
"""Is MPI initialized in the code or not. Defaults to True if MPI is available"""
return config.mpi.is_enabled
@option(choices=("none","null","file"), sections=("channel",))
def redirection(self):
"""Redirect the output of the code to null, standard streams or file"""
return "null"
@late
def redirection_filenames(self):
return {
"none":("none", "none"),
"null":("/dev/null", "/dev/null"),
"file":(self.redirect_stdout_file, self.redirect_stderr_file),
}[self.redirection]
@option(sections=("channel",))
def redirect_stdout_file(self):
return self.redirect_file
@option(sections=("channel",))
def redirect_stderr_file(self):
return self.redirect_file
@option(sections=("channel",))
def redirect_file(self):
return "code.out"
@option(type='boolean', sections=("channel",))
def must_start_worker(self):
return True
@late
def channel_factory(self):
if self.channel_type == 'mpi':
if MpiChannel.is_supported():
return MpiChannel
else:
return SocketChannel
elif self.channel_type == 'remote':
return MultiprocessingMPIChannel
elif self.channel_type == 'distributed':
return DistributedChannel
elif self.channel_type == 'sockets':
return SocketChannel
elif self.channel_type == 'local':
return LocalChannel
else:
raise exceptions.AmuseException("Cannot create a channel with type {0!r}, type is not supported".format(self.channel_type))
@option(type="boolean", sections=("channel",))
def reuse_worker(self):
"""Do not stop a worker, re-use an existing one"""
return False
def before_get_parameter(self):
"""
Called everytime just before a parameter is retrieved in using::
instance.parameter.name
"""
pass
def before_set_parameter(self):
"""
Called everytime just before a parameter is updated in using::
instance.parameter.name = newvalue
"""
pass
def before_set_interface_parameter(self):
"""
Called everytime just before a interface parameter is updated in using::
instance.parameter.name = newvalue
"""
pass
def before_new_set_instance(self):
"""
(Can be) called everytime just before a new set is created
"""
pass
def before_get_data_store_names(self):
"""
called before getting data store names (for state model) - should eventually
not be necessary
"""
pass
@option(type='string', sections=("channel",))
def interpreter(self):
return sys.executable
@option(type='boolean', sections=("channel",))
def use_interpreter(self):
return False
@legacy_function
def internal__become_code():
function = LegacyFunctionSpecification()
function.addParameter('number_of_workers', dtype='int32', direction=function.IN)
function.addParameter('modulename', dtype='string', direction=function.IN)
function.addParameter('classname', dtype='string', direction=function.IN)
function.result_type = 'int32'
function.internal_provided=True
return function
def get_code_module_directory(self):
return os.path.dirname(inspect.getmodule(self).__file__)
@option(sections=("channel",))
def working_directory(self):
return None
@legacy_function
def set_working_directory():
function = LegacyFunctionSpecification()
function.addParameter('working_directory', dtype='string', direction=function.IN)
function.result_type = 'int32'
function.internal_provided=True
return function
@legacy_function
def get_working_directory():
function = LegacyFunctionSpecification()
function.addParameter('working_directory', dtype='string', direction=function.OUT)
function.result_type = 'int32'
function.internal_provided=True
return function
class CodeWithDataDirectories(object):
def __init__(self):
if not self.channel_type == 'distributed':
self.ensure_data_directory_exists(self.get_output_directory())
def ensure_data_directory_exists(self, directory):
directory = os.path.expanduser(directory)
directory = os.path.expandvars(directory)
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno == errno.EEXIST and os.path.isdir(directory):
pass
else:
raise ex
@property
def module_name(self):
return self.__module__.split('.')[-2]
@property
def data_directory(self):
return self.get_data_directory()
@property
def output_directory(self):
return self.get_output_directory()
def get_data_directory(self):
"""
Returns the root name of the directory for the
application data files.
"""
if self.input_data_root_directory:
return os.path.join(self.input_data_root_directory, self.module_name, 'input')
else:
return os.path.join(self.get_code_module_directory(),"data", "input")
def get_output_directory(self):
"""
Returns the root name of the directory to use by the
application to store it's output / temporary files in.
"""
if self.output_data_root_directory:
return os.path.join(self.output_data_root_directory, self.module_name, 'output')
else:
working_directory=self.get_working_directory()['working_directory']
return os.path.join(working_directory,"__amuse_code_output", self.module_name) # note problem for multiple codes
@option(type="string", sections=('data',))
def amuse_root_directory(self):
"""
The root directory of AMUSE, used as default root for all data directories
"""
return self.channel.get_amuse_root_directory()
@option(type="string", sections=('data',))
def input_data_root_directory(self):
"""
The root directory of the input data, read only directories
"""
return None
@option(type="string", sections=('data',))
def output_data_root_directory(self):
"""
The root directory of the output data,
read - write directory
"""
return None
class PythonCodeInterface(CodeInterface):
"""
Base class for codes having a python implementation
:argument implementation_factory: Class of the python implementation
"""
def __init__(self, implementation_factory = None, name_of_the_worker = None, **options):
if self.channel_type == 'distributed':
print("Warning! Distributed channel not fully supported by PythonCodeInterface yet")
self.implementation_factory = implementation_factory
self.worker_dir=options.get("worker_dir",None)
CodeInterface.__init__(self, name_of_the_worker, **options)
def _start(self, name_of_the_worker = 'worker_code', **options):
if name_of_the_worker is None:
if self.implementation_factory is None:
raise exceptions.CodeException("Must provide the name of a worker script or the implementation_factory class")
name_of_the_worker = self.make_executable_script_for(self.implementation_factory)
if not options.setdefault("dynamic_python_code",True):
raise exceptions.CodeException("dynamic code set to false, but python code generated")
if self.use_python_interpreter:
CodeInterface._start(self, name_of_the_worker = name_of_the_worker, interpreter_executable = self.python_interpreter, **options)
else:
CodeInterface._start(self, name_of_the_worker = name_of_the_worker, **options)
def _check_if_worker_is_up_to_date(self):
pass
def make_executable_script_for(self, implementation_factory):
from amuse.rfi.tools.create_python_worker import CreateAPythonWorker
x = CreateAPythonWorker()
if self.worker_dir:
x.worker_dir=self.worker_dir
x.channel_type = self.channel_type
x.interface_class = type(self)
x.implementation_factory = implementation_factory
if self.channel_factory.is_root():
x.start()
return x.worker_name
@classmethod
def new_executable_script_string_for(cls, implementation_factory, channel_type = 'mpi'):
raise Exception("tracing use")
@option(type='boolean', sections=("channel",))
def use_python_interpreter(self):
return False
@option(type='string', sections=("channel",))
def python_interpreter(self):
return sys.executable
class CodeFunctionWithUnits(CodeFunction):
def __init__(self, interface, owner, specification):
"""
Implementation of the runtime call to the remote process.
Performs the encoding of python arguments into lists
of values, sends a message over an MPI channel and
waits for a result message, decodes this message and
returns.
"""
self.interface = interface
self.owner = owner
self.specification = specification
self.number_of_output_parameters = len(self.specification.output_parameters)
def __call__(self, *arguments_list, **keyword_arguments):
dtype_to_values, units = self.converted_keyword_and_list_arguments( arguments_list, keyword_arguments)
encoded_units = self.convert_input_units_to_floats(units)
handle_as_array = self.must_handle_as_array(dtype_to_values)
if not self.owner is None:
CODE_LOG.info("start call '%s.%s'",self.owner.__name__, self.specification.name)
call_id = random.randint(0, 1000)
try:
self.interface.channel.send_message(call_id, self.specification.id, dtype_to_arguments = dtype_to_values, encoded_units = encoded_units)
dtype_to_result , output_encoded_units = self.interface.channel.recv_message(call_id, self.specification.id, handle_as_array, has_units = True)
except Exception as ex:
CODE_LOG.info("Exception when calling function '{0}', of code '{1}', exception was '{2}'".format(self.specification.name, type(self.interface).__name__, ex))
raise exceptions.CodeException("Exception when calling function '{0}', of code '{1}', exception was '{2}'".format(self.specification.name, type(self.interface).__name__, ex))
output_units = self.convert_floats_to_units(output_encoded_units)
result = self.converted_results(dtype_to_result, handle_as_array, output_units)
if not self.owner is None:
CODE_LOG.info("end call '%s.%s'",self.owner.__name__, self.specification.name)
return result
def _async_request(self, *arguments_list, **keyword_arguments):
dtype_to_values, units = self.converted_keyword_and_list_arguments( arguments_list, keyword_arguments)
encoded_units = self.convert_input_units_to_floats(units)
handle_as_array = self.must_handle_as_array(dtype_to_values)
call_id = random.randint(0, 1000)
self.interface.channel.send_message(call_id, self.specification.id, dtype_to_arguments = dtype_to_values, encoded_units = encoded_units)
request = self.interface.channel.nonblocking_recv_message(call_id, self.specification.id, handle_as_array, has_units = True)
def handle_result(function):
try:
dtype_to_result, output_encoded_units = function()
except Exception as ex:
raise exceptions.CodeException("Exception when calling legacy code '{0}', exception was '{1}'".format(self.specification.name, ex))
output_units = self.convert_floats_to_units(output_encoded_units)
return self.converted_results(dtype_to_result, handle_as_array, output_units)
request.add_result_handler(handle_result)
return request
def must_handle_as_array(self, keyword_arguments):
for argument_type, argument_values in keyword_arguments.items():
if argument_values:
count = 0
for argument_value in argument_values:
try:
if not isinstance(argument_value, str):
count = max(count, len(argument_value))
except:
count = max(count, 0)
if count > 0:
return True
return False
"""
Convert results from an MPI message to a return value.
"""
def converted_results(self, dtype_to_result, must_handle_as_array, units):
number_of_outputs = self.number_of_output_parameters
result_type = self.specification.result_type
if number_of_outputs == 0:
if result_type is None:
return None
return dtype_to_result[result_type][0]
if number_of_outputs == 1 \
and result_type is None:
for value in dtype_to_result.values():
if len(value) == 1:
if must_handle_as_array:
return value
else:
return value[0]
result = OrderedDictionary()
dtype_to_array = {}
for key, value in dtype_to_result.items():
dtype_to_array[key] = list(reversed(value))
if not result_type is None:
return_value = dtype_to_array[result_type].pop()
for parameter in self.specification.output_parameters:
result[parameter.name] = dtype_to_array[parameter.datatype].pop()
if self.specification.has_units and not units[parameter.index_in_output] is None:
result[parameter.name] = result[parameter.name] | units[parameter.index_in_output]
if not result_type is None:
result["__result"] = return_value
return result
"""
Convert keyword arguments and list arguments to an MPI message
"""
def converted_keyword_and_list_arguments(self, arguments_list, keyword_arguments):
from amuse.units import quantities
dtype_to_values = self.specification.new_dtype_to_values()
units = [None] * len(self.specification.input_parameters)
input_parameters_seen = set([x.name for x in self.specification.input_parameters])
names_in_argument_list = set([])
for index, argument in enumerate(arguments_list):
parameter = self.specification.input_parameters[index]
names_in_argument_list.add(parameter.name)
if quantities.is_quantity(argument):
units[parameter.index_in_input] = argument.unit
argument = argument.number
values = dtype_to_values[parameter.datatype]
values[parameter.input_index] = argument
input_parameters_seen.remove(parameter.name)
for index, parameter in enumerate(self.specification.input_parameters):
if parameter.name in keyword_arguments:
argument = keyword_arguments[parameter.name]
if quantities.is_quantity(argument):
units[parameter.index_in_input] = argument.unit
argument = argument.number
values = dtype_to_values[parameter.datatype]
values[parameter.input_index] = argument
input_parameters_seen.remove(parameter.name)
for parameter in self.specification.input_parameters:
if (parameter.name in input_parameters_seen) and parameter.has_default_value():
argument = parameter.default
if quantities.is_quantity(argument):
units[parameter.index_in_input] = argument.unit
argument = argument.number
values = dtype_to_values[parameter.datatype]
values[parameter.input_index] = argument
input_parameters_seen.remove(parameter.name)
if input_parameters_seen:
raise exceptions.CodeException("Not enough parameters in call, missing " + str(sorted(input_parameters_seen)))
return dtype_to_values, units
def __str__(self):
return str(self.specification)
def convert_unit_to_floats(self, unit):
if unit is None:
return numpy.zeros(9, dtype=numpy.float64)
else:
return unit.to_array_of_floats()
def convert_input_units_to_floats(self, units):
result = numpy.zeros(len(units) * 9, dtype = numpy.float64)
for index, unit in enumerate(units):
offset = index*9
result[offset:offset+9] = self.convert_unit_to_floats(unit)
return result
def convert_floats_to_units(self, floats):
result = []
for index in range(len(floats) // 9):
offset = index*9
unit_floats = floats[offset:offset+9]
unit = self.convert_float_to_unit(unit_floats)
result.append(unit)
return result
def convert_float_to_unit(self, floats):
from amuse.units import core
from amuse.units import units
if numpy.all(floats == 0):
return None
factor = floats[0]
result = factor
system_index = floats[1]
unit_system = None
for x in core.system.ALL.values():
if x.index == system_index:
unit_system = x
break
for x in unit_system.bases:
power = floats[x.index + 2]
if not power == 0.0:
result = result * (x ** power)
return result
| 52,496
| 35.814165
| 186
|
py
|
amuse
|
amuse-main/src/amuse/rfi/nospawn.py
|
from amuse.rfi import core
from amuse.rfi.python_code import CythonImplementation
from mpi4py import MPI
from amuse.rfi import channel
from collections import namedtuple
import sys
import importlib
Code = namedtuple("Code", ['cls', 'number_of_workers', 'args', 'kwargs'])
PythonCode = namedtuple("Code", ['cls', 'number_of_workers', 'args', 'kwargs', 'implementation_factory'])
def get_number_of_workers_needed(codes):
result = 1
for x in codes:
result += x.number_of_workers
return result
def get_color(rank, codes):
if rank == 0:
return 0
else:
index = 1
for color, x in enumerate(codes):
if rank >= index and rank < index + x.number_of_workers:
return color + 1
index += x.number_of_workers
return len(codes) + 1 #left over ranks
def get_key(rank, codes):
if rank == 0:
return 0
else:
index = 1
for color, x in enumerate(codes):
if rank >= index and rank < index + x.number_of_workers:
return rank - index
index += x.number_of_workers
return rank - (len(codes) + 1) #left over ranks
def get_code_class(rank, codes):
if rank == 0:
return None
else:
index = 1
for color, x in enumerate(codes):
if rank >= index and rank < index + x.number_of_workers:
return x.cls
index += x.number_of_workers
return None
def start_all(codes):
channel.MpiChannel.ensure_mpi_initialized()
number_of_workers_needed = get_number_of_workers_needed(codes)
world = MPI.COMM_WORLD
rank = world.rank
if world.size < number_of_workers_needed:
if rank == 0:
raise Exception("cannot start all codes, the world size ({0}) is smaller than the number of requested codes ({1}) (which is always 1 + the sum of the all the number_of_worker fields)".format(world.size, number_of_workers_needed))
else:
return None
color = get_color(world.rank, codes)
key = get_key(world.rank, codes)
newcomm = world.Split(color, key)
localdup = world.Dup()
if world.rank == 0:
result = []
remote_leader = 1
tag = 1
for x in codes:
new_intercomm = newcomm.Create_intercomm(0, localdup, remote_leader, tag)
remote_leader += x.number_of_workers
tag += 1
instance = x.cls(*x.args, check_mpi = False, must_start_worker = False, **x.kwargs)
instance.legacy_interface.channel = channel.MpiChannel('_',None)
instance.legacy_interface.channel.intercomm = new_intercomm
result.append(instance)
world.Barrier()
return result
else:
code_cls = get_code_class(world.rank, codes)
if code_cls is None:
world.Barrier()
return None
new_intercomm = newcomm.Create_intercomm(0, localdup, 0, color)
x = get_code(world.rank, codes)
instance = code_cls(*x.args, check_mpi = False, must_start_worker = False, **x.kwargs)
interface = instance.legacy_interface
if hasattr(interface, '__so_module__'):
package, _ = code_cls.__module__.rsplit('.',1)
modulename = package + '.' + interface.__so_module__
module = importlib.import_module(modulename)
module.set_comm_world(newcomm)
else:
module = x.implementation_factory()
instance = CythonImplementation(module, interface.__class__)
instance.intercomm = new_intercomm
instance.must_disconnect = False
world.Barrier()
instance.start()
return None
def stop_all(instances):
for x in instances:
x.stop()
def start_empty():
channel.MpiChannel.ensure_mpi_initialized()
world = MPI.COMM_WORLD
rank = world.rank
color = 0 if world.rank == 0 else 1
key = 0 if world.rank == 0 else world.rank -1
newcomm = world.Split(color, key)
localdup = world.Dup()
if world.rank == 0:
result = []
remote_leader = 1
tag = 1
new_intercomm = newcomm.Create_intercomm(0, localdup, remote_leader, tag)
instance = core.CodeInterface(check_mpi = False, must_start_worker = False)
instance.channel = channel.MpiChannel('_',None)
instance.channel.intercomm = new_intercomm
instance.world = localdup
instance.remote_leader = 1
world.Barrier()
return instance
else:
new_intercomm = newcomm.Create_intercomm(0, localdup, 0, color)
instance = CythonImplementation(None, core.CodeInterface)
instance.intercomm = new_intercomm
instance.world = localdup
instance.freeworld = newcomm
instance.localworld = newcomm
instance.must_disconnect = False
world.Barrier()
instance.start()
print("STOP...", world.rank)
return None
def get_code(rank, codes):
if rank == 0:
return None
else:
index = 1
for color, x in enumerate(codes):
if rank >= index and rank < index + x.number_of_workers:
return x
index += x.number_of_workers
return None
| 5,500
| 29.392265
| 241
|
py
|
amuse
|
amuse-main/src/amuse/rfi/slurm.py
|
def parse_slurm_tasks_per_node(string):
per_node = string.split(',')
result = []
for node in per_node:
parts = node.split('(')
count = parts[0]
if len(parts) == 2:
nodes = parts[1]
else:
nodes = None
try:
count = int(count)
except:
count = 0 # unparsable number
if nodes:
nodes = nodes[1:-1] # skip the 'x' character and the closing ')' character
try:
nodes = int(nodes)
except:
nodes = 1 # unparsable number, assume 1
for _ in range(nodes):
result.append(count)
else:
result.append(count)
return result
def parse_slurm_nodelist(string):
result = []
name_characters = []
position = 0
while position < len(string):
char = string[position]
if char == '[':
name = ''.join(name_characters)
ids, position = parse_ids(string, position)
for x in ids:
result.append(name + x)
name_characters = []
elif char == ',':
name = ''.join(name_characters)
result.append(name)
name_characters = []
position += 1
else:
name_characters.append(char)
position += 1
if len(name_characters) > 0:
name = ''.join(name_characters)
result.append(name)
name_characters = []
return result
def parse_ids(string, position):
result = []
end = string.index(']',position)
count_ranges = string[position+1:end]
for count_range in count_ranges.split(','):
if '-' in count_range:
from_id, to_id = count_range.split('-')
for number in range(int(from_id), int(to_id) + 1):
result.append(str(number))
else:
result.append(count_range)
return result, end+1
if __name__ == "__main__":
print(parse_slurm_tasks_per_node("10(x4),3"))
print(parse_slurm_nodelist("tcn[595,597-598,600-606],tcn100"))
| 2,138
| 27.905405
| 86
|
py
|
amuse
|
amuse-main/src/amuse/rfi/run_command_redirected.py
|
from subprocess import call
import sys
import os.path
import time
import signal
def translate_filename_for_os(filename):
if sys.platform == 'win32':
if filename == '/dev/null':
return 'nul'
else:
return filename
else:
return filename
if __name__ == '__main__':
stdoutfname = None
if sys.argv[1] == 'none':
stdout = None
else:
stdoutfname=translate_filename_for_os(sys.argv[1])
stdout = open(stdoutfname,'w')
if sys.argv[2] == 'none':
stderr = None
else:
stderrfname=translate_filename_for_os(sys.argv[2])
if sys.argv[2] != '/dev/null' and stdoutfname == stderrfname:
stderr = open(stderrfname,'a')
else:
stderr = open(stderrfname,'w')
stdin = open(translate_filename_for_os('/dev/null'),'r')
returncode = call(
sys.argv[3:],
stdout = stdout,
stderr = stderr,
stdin = stdin,
close_fds = False
)
stdin.close()
if not stdout is None:
stdout.close()
if not stderr is None:
stderr.close()
sys.exit(returncode)
| 1,197
| 21.185185
| 69
|
py
|
amuse
|
amuse-main/src/amuse/rfi/python_code.py
|
from amuse.support.core import late, OrderedDictionary
from mpi4py import MPI
import numpy
import sys
import os
import socket
import traceback
import types
import warnings
from amuse.rfi.channel import ClientSideMPIMessage
from amuse.rfi.channel import SocketMessage
from amuse.rfi.channel import pack_array
from amuse.rfi.channel import unpack_array
from amuse.rfi.core import legacy_function
from amuse.rfi.core import LegacyFunctionSpecification
class ValueHolder(object):
def __init__(self, value = None):
self.value = value
def __repr__(self):
return "V({0!r})".format(self.value)
def __str__(self):
return "V({0!s})".format(self.value)
class PythonImplementation(object):
dtype_to_message_attribute = {
'int32' : 'ints',
'float64' : 'doubles',
'float32' : 'floats',
'string' : 'strings',
'bool' : 'booleans',
'int64' : 'longs',
}
def __init__(self, implementation, interface):
self.implementation = implementation
self.interface = interface
self.must_run = False
self.polling_interval = 0
self.communicators = []
self.lastid = -1
self.activeid = -1
self.id_to_activate = -1
if not self.implementation is None:
self.implementation._interface = self
def start(self, mpi_port = None):
if mpi_port is None:
parent = self.intercomm
self.communicators.append(parent)
else:
parent = MPI.COMM_WORLD.Connect(mpi_port, MPI.INFO_NULL, 0)
self.communicators.append(parent)
self.activeid = 0
self.lastid += 1
rank = parent.Get_rank()
self.must_run = True
while self.must_run:
if self.id_to_activate >= 0 and self.id_to_activate != self.activeid:
warnings.warn("activating: "+str(self.id_to_activate))
self.activeid = self.id_to_activate
self.id_to_activate = -1
parent = self.communicators[self.activeid]
rank = parent.Get_rank()
message = ClientSideMPIMessage(polling_interval = self.polling_interval)
message.receive(parent)
result_message = ClientSideMPIMessage(message.call_id, message.function_id, message.call_count)
if message.function_id == 0:
self.must_run = False
else:
if message.function_id in self.mapping_from_tag_to_legacy_function:
try:
self.handle_message(message, result_message)
except Exception as ex:
warnings.warn(str(ex))
traceback.print_exc()
result_message.set_error(str(ex))
#for type, attribute in self.dtype_to_message_attribute.iteritems():
# setattr(result_message, attribute, [])
for type, attribute in self.dtype_to_message_attribute.items():
array = getattr(result_message, attribute)
packed = pack_array(array, result_message.call_count, type)
setattr(result_message, attribute, packed)
else:
result_message.set_error("unknown function id " + str(message.function_id))
if rank == 0:
result_message.send(parent)
if self.must_disconnect:
for x in self.communicators:
x.Disconnect()
def start_socket(self, port, host):
client_socket = socket.create_connection((host, port))
self.must_run = True
while self.must_run:
message = SocketMessage()
message.receive(client_socket)
result_message = SocketMessage(message.call_id, message.function_id, message.call_count)
if message.function_id == 0:
self.must_run = False
else:
if message.function_id in self.mapping_from_tag_to_legacy_function:
try:
self.handle_message(message, result_message)
except BaseException as ex:
traceback.print_exc()
result_message.set_error(ex.__str__())
for type, attribute in self.dtype_to_message_attribute.items():
array = getattr(result_message, attribute)
packed = pack_array(array, result_message.call_count, type)
setattr(result_message, attribute, packed)
else:
result_message.set_error("unknown function id " + message.function_id)
result_message.send(client_socket)
client_socket.close()
def start_socket_mpi(self, port, host):
rank=MPI.COMM_WORLD.Get_rank()
if rank==0:
client_socket = socket.create_connection((host, port))
self.must_run = True
while self.must_run:
if rank==0:
message = SocketMessage()
message.receive(client_socket)
else:
message=None
message=MPI.COMM_WORLD.bcast(message, root=0)
result_message = SocketMessage(message.call_id, message.function_id, message.call_count)
if message.function_id == 0:
self.must_run = False
else:
if message.function_id in self.mapping_from_tag_to_legacy_function:
try:
self.handle_message(message, result_message)
except BaseException as ex:
traceback.print_exc()
result_message.set_error(ex.__str__())
for type, attribute in self.dtype_to_message_attribute.items():
array = getattr(result_message, attribute)
packed = pack_array(array, result_message.call_count, type)
setattr(result_message, attribute, packed)
else:
result_message.set_error("unknown function id " + message.function_id)
if rank==0:
result_message.send(client_socket)
if rank==0:
client_socket.close()
def handle_message(self, input_message, output_message):
legacy_function = self.mapping_from_tag_to_legacy_function[input_message.function_id]
specification = legacy_function.specification
dtype_to_count = self.get_dtype_to_count(specification)
if hasattr(specification, "internal_provided"):
method = getattr(self, specification.name)
else:
method = getattr(self.implementation, specification.name)
if specification.has_units:
input_units = self.convert_floats_to_units(input_message.encoded_units)
else:
input_units = ()
for type, attribute in self.dtype_to_message_attribute.items():
count = dtype_to_count.get(type,0)
for x in range(count):
if type == 'string':
getattr(output_message, attribute).append([""] * output_message.call_count)
else:
getattr(output_message, attribute).append(numpy.zeros(output_message.call_count, dtype=type))
for type, attribute in self.dtype_to_message_attribute.items():
array = getattr(input_message, attribute)
unpacked = unpack_array(array, input_message.call_count, type)
setattr(input_message,attribute, unpacked)
units = [False] * len(specification.output_parameters)
if specification.must_handle_array:
keyword_arguments = self.new_keyword_arguments_from_message(input_message, None, specification, input_units)
try:
result = method(**keyword_arguments)
except TypeError as ex:
warnings.warn("mismatch in python function specification(?): "+str(ex))
result = method(*list(keyword_arguments))
self.fill_output_message(output_message, None, result, keyword_arguments, specification, units)
else:
for index in range(input_message.call_count):
keyword_arguments = self.new_keyword_arguments_from_message(input_message, index, specification, input_units)
try:
result = method(**keyword_arguments)
except TypeError as ex:
warnings.warn("mismatch in python function specification(?): "+str(ex))
result = method(*list(keyword_arguments))
self.fill_output_message(output_message, index, result, keyword_arguments, specification, units)
for type, attribute in self.dtype_to_message_attribute.items():
array = getattr(output_message, attribute)
packed = pack_array(array, input_message.call_count, type)
setattr(output_message, attribute, packed)
if specification.has_units:
output_message.encoded_units = self.convert_output_units_to_floats(units)
def new_keyword_arguments_from_message(self, input_message, index, specification, units = []):
keyword_arguments = OrderedDictionary()
for parameter in specification.parameters:
attribute = self.dtype_to_message_attribute[parameter.datatype]
argument_value = None
if parameter.direction == LegacyFunctionSpecification.IN:
if specification.must_handle_array:
argument_value = getattr(input_message, attribute)[parameter.input_index]
else:
argument_value = getattr(input_message, attribute)[parameter.input_index][index]
if specification.has_units:
unit = units[parameter.index_in_input]
if not unit is None:
argument_value = argument_value | unit
elif parameter.direction == LegacyFunctionSpecification.INOUT:
if specification.must_handle_array:
argument_value = ValueHolder(getattr(input_message, attribute)[parameter.input_index])
else:
argument_value = ValueHolder(getattr(input_message, attribute)[parameter.input_index][index])
if specification.has_units:
unit = units[parameter.index_in_input]
if not unit is None:
argument_value.value = argument_value.value | unit
elif parameter.direction == LegacyFunctionSpecification.OUT:
argument_value = ValueHolder(None)
elif parameter.direction == LegacyFunctionSpecification.LENGTH:
argument_value = input_message.call_count
name = 'in_' if parameter.name == 'in' else parameter.name
keyword_arguments[name] = argument_value
return keyword_arguments
def fill_output_message(self, output_message, index, result, keyword_arguments, specification, units):
from amuse.units import quantities
if not specification.result_type is None:
attribute = self.dtype_to_message_attribute[specification.result_type]
if specification.must_handle_array:
getattr(output_message, attribute)[0] = result
else:
getattr(output_message, attribute)[0][index] = result
for parameter in specification.parameters:
attribute = self.dtype_to_message_attribute[parameter.datatype]
if (parameter.direction == LegacyFunctionSpecification.OUT or
parameter.direction == LegacyFunctionSpecification.INOUT):
argument_value = keyword_arguments[parameter.name]
output = argument_value.value
if specification.has_units:
unit = output.unit if quantities.is_quantity(output) else None
if specification.must_handle_array or index == 0:
units[parameter.index_in_output] = unit
else:
unit = units[parameter.index_in_output]
if not unit is None:
output = output.value_in(unit)
if specification.must_handle_array:
getattr(output_message, attribute)[parameter.output_index] = output
else:
getattr(output_message, attribute)[parameter.output_index][index] = output
def get_dtype_to_count(self, specification):
dtype_to_count = {}
for parameter in specification.output_parameters:
count = dtype_to_count.get(parameter.datatype, 0)
dtype_to_count[parameter.datatype] = count + 1
if not specification.result_type is None:
count = dtype_to_count.get(specification.result_type, 0)
dtype_to_count[specification.result_type] = count + 1
return dtype_to_count
@late
def mapping_from_tag_to_legacy_function(self):
result = {}
for x in self.interface_functions:
result[x.specification.id] = x
return result
@late
def interface_functions(self):
attribute_names = dir(self.interface)
interface_functions = []
for x in attribute_names:
if x.startswith('__'):
continue
value = getattr(self.interface, x)
if isinstance(value, legacy_function):
interface_functions.append(value)
interface_functions.sort(key= lambda x: x.specification.id)
for x in interface_functions:
x.specification.prepare_output_parameters()
return interface_functions
def internal__set_message_polling_interval(self, inval):
self.polling_interval = inval
return 0
def internal__get_message_polling_interval(self, outval):
outval.value = self.polling_interval
return 0
def get_null_info(self):
return getattr(MPI, 'INFO_NULL') if hasattr(MPI, 'INFO_NULL') else None
def internal__open_port(self, port_identifier):
port_identifier.value = MPI.Open_port(self.get_null_info())
return 0
def internal__accept_on_port(self, port_identifier, comm_identifier):
new_communicator = None
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
communicator = MPI.COMM_SELF.Accept(port_identifier, self.get_null_info(), 0)
merged = communicator.Merge(False)
new_communicator = MPI.COMM_WORLD.Create_intercomm(0, merged, 1, 65)
merged.Free()
communicator.Free()
else:
new_communicator = MPI.COMM_WORLD.Create_intercomm(0, MPI.COMM_WORLD, 1, 65)
self.communicators.append(new_communicator)
self.lastid += 1
comm_identifier.value = self.lastid
return 0
def internal__connect_to_port(self, port_identifier, comm_identifier):
new_communicator = None
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
communicator = MPI.COMM_SELF.Connect(port_identifier, self.get_null_info(), 0)
merged = communicator.Merge(True)
new_communicator = MPI.COMM_WORLD.Create_intercomm(0, merged, 0, 65)
merged.Free()
communicator.Free()
else:
new_communicator = MPI.COMM_WORLD.Create_intercomm(0, MPI.COMM_WORLD, 0, 65)
self.communicators.append(new_communicator)
self.lastid += 1
comm_identifier.value = self.lastid
return 0
def internal__activate_communicator(self, comm_identifier):
if comm_identifier > self.lastid or comm_identifier < 0:
return -1
self.id_to_activate = comm_identifier
return 0
def internal__redirect_outputs(self, stdoutfile, stderrfile):
mpi_rank = MPI.COMM_WORLD.rank
sys.stdin.close()
try:
os.close(0)
except Exception as ex:
warnings.warn( str(ex))
if stdoutfile != "none":
if stdoutfile != "/dev/null":
fullname = "{0:s}.{1:03d}".format(stdoutfile, mpi_rank)
else:
fullname = stdoutfile
sys.stdout.close()
sys.stdout = open(fullname, "a+")
if stderrfile != "none":
if stderrfile != "/dev/null":
fullname = "{0:s}.{1:03d}".format(stderrfile, mpi_rank)
else:
fullname = stderrfile
sys.stderr.close()
sys.stderr = open(fullname, "a+")
return 0
def convert_to_unit(self, units_as_floats, index):
return None
def convert_unit_to_floats(self, unit):
if unit is None:
return numpy.zeros(9, dtype=numpy.float64)
else:
return unit.to_array_of_floats()
def convert_output_units_to_floats(self, units):
result = numpy.zeros(len(units) * 9, dtype = numpy.float64)
for index, unit in enumerate(units):
offset = index*9
result[offset:offset+9] = self.convert_unit_to_floats(unit)
return result
def convert_float_to_unit(self, floats):
from amuse.units import core
from amuse.units import units
if numpy.all(floats == 0):
return None
factor = floats[0]
result = factor
system_index = floats[1]
unit_system = None
for x in core.system.ALL.values():
if x.index == system_index:
unit_system = x
break
for x in unit_system.bases:
power = floats[x.index + 2]
if not power == 0.0:
result = result * (x ** power)
return result
def convert_floats_to_units(self, floats):
result = []
for index in range(len(floats) // 9):
offset = index*9
unit_floats = floats[offset:offset+9]
unit = self.convert_float_to_unit(unit_floats)
result.append(unit)
return result
@late
def intercomm(self):
return MPI.Comm.Get_parent()
@late
def must_disconnect(self):
return True
def internal__become_code(self, number_of_workers, modulename, classname):
warnings.warn(" possible experimental code path?")
#~ print number_of_workers, modulename, classname
world = self.freeworld
color = 0 if world.rank < number_of_workers else 1
key = world.rank if world.rank < number_of_workers else world.rank - number_of_workers
#~ print "CC,", color, key, world.rank, world.size
newcomm = world.Split(color, key)
#~ print ("nc:", newcomm.size, newcomm.rank)
#~ print ("AA", self.world, color, self.world.rank, self.world.size)
try:
new_intercomm = newcomm.Create_intercomm(0, self.world, 0, color)
except Exception as ex:
warnings.warn(str(ex))
raise ex
#~ print ("nccc:", new_intercomm.Get_remote_size(), new_intercomm.rank)
self.communicators.append(new_intercomm)
self.id_to_activate = len(self.communicators) - 1
self.freeworld = newcomm
return 0
def set_working_directory(self, d):
try:
os.chdir(d)
return 0
except Exception:
return -1
def get_working_directory(self, d):
try:
d.value=os.getcwd()
return 0
except Exception:
return -1
class CythonImplementation(PythonImplementation):
def handle_message(self, input_message, output_message):
legacy_function = self.mapping_from_tag_to_legacy_function[input_message.function_id]
specification = legacy_function.specification
dtype_to_count = self.get_dtype_to_count(specification)
if specification.name == '_stop_worker':
method = lambda : None
elif hasattr(specification,"internal_provided"):
method = getattr(self, specification.name)
else:
method = getattr(self.implementation, specification.name)
if specification.has_units:
input_units = self.convert_floats_to_units(input_message.encoded_units)
else:
input_units = ()
for type, attribute in self.dtype_to_message_attribute.items():
count = dtype_to_count.get(type,0)
for x in range(count):
if type == 'string':
getattr(output_message, attribute).append([""] * output_message.call_count)
else:
getattr(output_message, attribute).append(numpy.zeros(output_message.call_count, dtype=type))
for type, attribute in self.dtype_to_message_attribute.items():
array = getattr(input_message, attribute)
unpacked = unpack_array(array, input_message.call_count, type)
setattr(input_message,attribute, unpacked)
units = [False] * len(specification.output_parameters)
if specification.must_handle_array:
keyword_arguments = self.new_keyword_arguments_from_message(input_message, None, specification, input_units)
result = method(**keyword_arguments)
self.fill_output_message(output_message, None, result, keyword_arguments, specification, units)
else:
for index in range(input_message.call_count):
#print "INDEX:", index
keyword_arguments = self.new_keyword_arguments_from_message(input_message, index, specification, input_units)
try:
result = method(**keyword_arguments)
except TypeError as ex:
result = method(*list(keyword_arguments))
self.fill_output_message(output_message, index, result, keyword_arguments, specification, units)
for type, attribute in self.dtype_to_message_attribute.items():
array = getattr(output_message, attribute)
packed = pack_array(array, input_message.call_count, type)
setattr(output_message, attribute, packed)
if specification.has_units:
output_message.encoded_units = self.convert_output_units_to_floats(units)
| 23,380
| 38.098662
| 126
|
py
|
amuse
|
amuse-main/src/amuse/rfi/channel.py
|
import inspect
import numpy
import os.path
import pickle as pickle
import sys
import struct
import threading
import select
import atexit
import time
import socket
import array
import logging
import shlex
logger = logging.getLogger(__name__)
#
# we want to use the automatic initialization and finalization
# of the MPI library, but sometime MPI should not be imported
# when importing the channel
# so actual import is in function ensure_mpi_initialized
#
MPI = None
from subprocess import Popen, PIPE
try:
from amuse import config
except ImportError:
config = None
from amuse.support.options import OptionalAttributes, option, GlobalOptions
from amuse.support.core import late
from amuse.support import exceptions
from amuse.support import get_amuse_root_dir
from amuse.rfi import run_command_redirected
from amuse.rfi import slurm
from . import async_request
class AbstractMessage(object):
def __init__(self,
call_id=0, function_id=-1, call_count=1,
dtype_to_arguments={},
error=False,
big_endian=(sys.byteorder.lower() == 'big'),
polling_interval=0,
encoded_units = ()):
self.polling_interval = polling_interval
# flags
self.big_endian = big_endian
self.error = error
# header
self.call_id = call_id
self.function_id = function_id
self.call_count = call_count
# data (numpy arrays)
self.ints = []
self.longs = []
self.floats = []
self.doubles = []
self.strings = []
self.booleans = []
self.pack_data(dtype_to_arguments)
self.encoded_units = encoded_units
def pack_data(self, dtype_to_arguments):
for dtype, attrname in self.dtype_to_message_attribute():
if dtype in dtype_to_arguments:
array = pack_array(dtype_to_arguments[dtype], self.call_count, dtype)
setattr(self, attrname, array)
def to_result(self, handle_as_array=False):
dtype_to_result = {}
for dtype, attrname in self.dtype_to_message_attribute():
result = getattr(self, attrname)
if self.call_count > 1 or handle_as_array:
dtype_to_result[dtype] = unpack_array(result , self.call_count, dtype)
else:
dtype_to_result[dtype] = result
return dtype_to_result
def dtype_to_message_attribute(self):
return (
('int32', 'ints'),
('int64', 'longs'),
('float32', 'floats'),
('float64', 'doubles'),
('bool', 'booleans'),
('string', 'strings'),
)
def receive(self, comm):
raise NotImplementedError
def send(self, comm):
raise NotImplementedError
def set_error(self, message):
self.strings = [message]
self.error = True
class MPIMessage(AbstractMessage):
def receive(self, comm):
header = self.receive_header(comm)
self.receive_content(comm, header)
def receive_header(self, comm):
header = numpy.zeros(11, dtype='i')
self.mpi_receive(comm, [header, MPI.INT])
return header
def receive_content(self, comm, header):
# 4 flags as 8bit booleans in 1st 4 bytes of header
# endiannes(not supported by MPI channel), error, unused, unused
flags = header.view(dtype='bool8')
self.big_endian = flags[0]
self.error = flags[1]
self.is_continued = flags[2]
self.call_id = header[1]
self.function_id = header[2]
self.call_count = header[3]
number_of_ints = header[4]
number_of_longs = header[5]
number_of_floats = header[6]
number_of_doubles = header[7]
number_of_booleans = header[8]
number_of_strings = header[9]
number_of_units = header[10]
self.ints = self.receive_ints(comm, number_of_ints)
self.longs = self.receive_longs(comm, number_of_longs)
self.floats = self.receive_floats(comm, number_of_floats)
self.doubles = self.receive_doubles(comm, number_of_doubles)
self.booleans = self.receive_booleans(comm, number_of_booleans)
self.strings = self.receive_strings(comm, number_of_strings)
self.encoded_units = self.receive_doubles(comm, number_of_units)
def nonblocking_receive(self, comm):
header = numpy.zeros(11, dtype='i')
request = self.mpi_nonblocking_receive(comm, [header, MPI.INT])
return async_request.ASyncRequest(request, self, comm, header)
def receive_doubles(self, comm, total):
if total > 0:
result = numpy.empty(total, dtype='d')
self.mpi_receive(comm, [result, MPI.DOUBLE])
return result
else:
return []
def receive_ints(self, comm, total):
if total > 0:
result = numpy.empty(total, dtype='i')
self.mpi_receive(comm, [result, MPI.INT])
return result
else:
return []
def receive_longs(self, comm, total):
if total > 0:
result = numpy.empty(total, dtype='int64')
self.mpi_receive(comm, [result, MPI.INTEGER8])
return result
else:
return []
def receive_floats(self, comm, total):
if total > 0:
result = numpy.empty(total, dtype='f')
self.mpi_receive(comm, [result, MPI.FLOAT])
return result
else:
return []
def receive_booleans(self, comm, total):
if total > 0:
result = numpy.empty(total, dtype='b')
self.mpi_receive(comm, [result, MPI.C_BOOL or MPI.BYTE]) # if C_BOOL null datatype (ie undefined) fallback
return numpy.logical_not(result == 0)
else:
return []
def receive_strings(self, comm, total):
if total > 0:
sizes = numpy.empty(total, dtype='i')
self.mpi_receive(comm, [sizes, MPI.INT])
logger.debug("got %d strings of size %s", total, sizes)
byte_size = 0
for size in sizes:
byte_size = byte_size + size + 1
data_bytes = numpy.empty(byte_size, dtype=numpy.uint8)
self.mpi_receive(comm, [data_bytes, MPI.CHARACTER])
strings = []
begin = 0
for size in sizes:
strings.append(data_bytes[begin:begin + size].tobytes().decode('latin_1'))
begin = begin + size + 1
logger.debug("got %d strings of size %s, data = %s", total, sizes, strings)
return numpy.array(strings)
else:
return []
def send(self, comm):
header = numpy.array([
0,
self.call_id,
self.function_id,
self.call_count,
len(self.ints) ,
len(self.longs) ,
len(self.floats) ,
len(self.doubles) ,
len(self.booleans) ,
len(self.strings) ,
len(self.encoded_units)
], dtype='i')
flags = header.view(dtype='bool8')
flags[0] = self.big_endian
flags[1] = self.error
flags[2] = len(self.encoded_units) > 0
self.send_header(comm, header)
self.send_content(comm)
def send_header(self, comm, header):
self.mpi_send(comm, [header, MPI.INT])
def send_content(self, comm):
self.send_ints(comm, self.ints)
self.send_longs(comm, self.longs)
self.send_floats(comm, self.floats)
self.send_doubles(comm, self.doubles)
self.send_booleans(comm, self.booleans)
self.send_strings(comm, self.strings)
self.send_doubles(comm, self.encoded_units)
def send_ints(self, comm, array):
if len(array) > 0:
sendbuffer = numpy.array(array, dtype='int32')
self.mpi_send(comm, [sendbuffer, MPI.INT])
def send_longs(self, comm, array):
if len(array) > 0:
sendbuffer = numpy.array(array, dtype='int64')
self.mpi_send(comm, [sendbuffer, MPI.INTEGER8])
def send_doubles(self, comm, array):
if len(array) > 0:
sendbuffer = numpy.array(array, dtype='d')
self.mpi_send(comm, [sendbuffer, MPI.DOUBLE])
def send_floats(self, comm, array):
if len(array) > 0:
sendbuffer = numpy.array(array, dtype='f')
self.mpi_send(comm, [sendbuffer, MPI.FLOAT])
def send_strings(self, comm, array):
if len(array) == 0:
return
lengths = numpy.array( [len(s) for s in array] ,dtype='i')
chars=(chr(0).join(array)+chr(0)).encode("utf-8")
chars = numpy.frombuffer(chars, dtype='uint8')
if len(chars) != lengths.sum()+len(lengths):
raise Exception("send_strings size mismatch {0} vs {1}".format( len(chars) , lengths.sum()+len(lengths) ))
self.mpi_send(comm, [lengths, MPI.INT])
self.mpi_send(comm, [chars, MPI.CHARACTER])
def send_booleans(self, comm, array):
if len(array) > 0:
sendbuffer = numpy.array(array, dtype='b')
self.mpi_send(comm, [sendbuffer, MPI.C_BOOL or MPI.BYTE])
def set_error(self, message):
self.strings = [message]
self.error = True
def mpi_nonblocking_receive(self, comm, array):
raise NotImplementedError()
def mpi_receive(self, comm, array):
raise NotImplementedError()
def mpi_send(self, comm, array):
raise NotImplementedError()
class ServerSideMPIMessage(MPIMessage):
def mpi_receive(self, comm, array):
request = comm.Irecv(array, source=0, tag=999)
request.Wait()
def mpi_send(self, comm, array):
comm.Bcast(array, root=MPI.ROOT)
def send_header(self, comm, array):
requests = []
for rank in range(comm.Get_remote_size()):
request = comm.Isend(array, dest=rank, tag=989)
requests.append(request)
MPI.Request.Waitall(requests)
def mpi_nonblocking_receive(self, comm, array):
return comm.Irecv(array, source=0, tag=999)
def receive_header(self, comm):
header = numpy.zeros(11, dtype='i')
request = self.mpi_nonblocking_receive(comm, [header, MPI.INT])
if self.polling_interval > 0:
is_finished = request.Test()
while not is_finished:
time.sleep(self.polling_interval / 1000000.)
is_finished = request.Test()
request.Wait()
else:
request.Wait()
return header
class ClientSideMPIMessage(MPIMessage):
def mpi_receive(self, comm, array):
comm.Bcast(array, root=0)
def mpi_send(self, comm, array):
comm.Send(array, dest=0, tag=999)
def mpi_nonblocking_receive(self, comm, array):
return comm.Irecv(array, source=0, tag=999)
def receive_header(self, comm):
header = numpy.zeros(11, dtype='i')
request = comm.Irecv([header, MPI.INT], source=0, tag=989)
if self.polling_interval > 0:
is_finished = request.Test()
while not is_finished:
time.sleep(self.polling_interval / 1000000.)
is_finished = request.Test()
request.Wait()
else:
request.Wait()
return header
MAPPING = {}
def pack_array(array, length, dtype):
if dtype == 'string':
if length == 1 and len(array) > 0 and isinstance(array[0], str):
return array
result = []
for x in array:
if isinstance(x, str):
for _ in range(length):
result.append(x)
elif len(x) == 1 and length > 1:
for _ in range(length):
result.append(x[0])
else:
result.extend(x)
return result
else:
total_length = length * len(array)
if dtype in MAPPING:
result = MAPPING.dtype
if len(result) != total_length:
result = numpy.empty(length * len(array), dtype=dtype)
else:
result = numpy.empty(length * len(array), dtype=dtype)
for i in range(len(array)):
offset = i * length
result[offset:offset + length] = array[i]
return result
def unpack_array(array, length, dtype=None):
result = []
total = len(array) // length
for i in range(total):
offset = i * length
result.append(array[offset:offset + length])
return result
class AbstractMessageChannel(OptionalAttributes):
"""
Abstract base class of all message channel.
A message channel is used to send and retrieve messages from
a remote party. A message channel can also setup the remote
party. For example starting an instance of an application
using MPI calls.
The messages are encoded as arguments to the send and retrieve
methods. Each message has an id and and optional list of doubles,
integers, floats and/or strings.
"""
def __init__(self, **options):
OptionalAttributes.__init__(self, **options)
@classmethod
def GDB(cls, full_name_of_the_worker, channel, interpreter_executable=None, immediate_run=True):
arguments = ['-hold', '-display', os.environ['DISPLAY'], '-e', 'gdb']
if immediate_run:
arguments.extend([ '-ex', 'run'])
arguments.extend(['--args'])
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'xterm'
return command, arguments
@classmethod
def LLDB(cls, full_name_of_the_worker, channel, interpreter_executable=None, immediate_run=True):
arguments = ['-hold', '-display', os.environ['DISPLAY'], '-e', 'lldb', '--']
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'xterm'
return command, arguments
@classmethod
def DDD(cls, full_name_of_the_worker, channel, interpreter_executable=None, immediate_run=True):
if os.name == 'nt':
arguments = [full_name_of_the_worker, "--args",full_name_of_the_worker]
command = channel.adg_exe
return command, arguments
else:
arguments = ['-display', os.environ['DISPLAY'], '-e', 'ddd', '--args']
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'xterm'
return command, arguments
@classmethod
def VALGRIND(cls, full_name_of_the_worker, channel, interpreter_executable=None, immediate_run=True):
# arguments = ['-hold', '-display', os.environ['DISPLAY'], '-e', 'valgrind', full_name_of_the_worker]
arguments = []
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'valgrind'
return command, arguments
@classmethod
def XTERM(cls, full_name_of_the_worker, channel, interpreter_executable=None, immediate_run=True):
arguments = ['-hold', '-display', os.environ['DISPLAY'], '-e']
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'xterm'
return command, arguments
@classmethod
def REDIRECT(cls, full_name_of_the_worker, stdoutname, stderrname, command=None, interpreter_executable=None, **options):
fname = run_command_redirected.__file__
arguments = [fname , stdoutname, stderrname]
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
if command is None :
command = sys.executable
return command, arguments
@classmethod
def GDBR(cls, full_name_of_the_worker, channel, interpreter_executable=None, immediate_run=True):
"remote gdb, can run without xterm"
arguments = ['localhost:{0}'.format(channel.debugger_port)]
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = channel.gdbserver_exe
return command, arguments
@classmethod
def NODEBUGGER(cls, full_name_of_the_worker, channel, interpreter_executable=None, immediate_run=True):
if not interpreter_executable is None:
return interpreter_executable, [full_name_of_the_worker]
else:
return full_name_of_the_worker, []
@classmethod
def STRACE(cls, full_name_of_the_worker, channel, interpreter_executable=None, immediate_run=True):
arguments = ['-ostrace-out', '-ff']
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'strace'
return command, arguments
@classmethod
def CUSTOM(cls, full_name_of_the_worker, channel, interpreter_executable=None, immediate_run=True):
arguments = list(shlex.split(channel.custom_args))
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = channel.custom_exe
return command, arguments
@classmethod
def is_multithreading_supported(cls):
return True
@option(type="boolean", sections=("channel",))
def initialize_mpi(self):
"""Is MPI initialized in the code or not. Defaults to True if MPI is available"""
return config.mpi.is_enabled
@option(type='string', sections=("channel",))
def worker_code_suffix(self):
return ''
@option(type='string', sections=("channel",))
def worker_code_prefix(self):
return ''
@option(type='string', sections=("channel",))
def worker_code_directory(self):
return ''
@option(type="boolean", sections=("channel",))
def can_redirect_output(self):
return True
@option(sections=("channel",))
def python_exe_for_redirection(self):
return None
@option(type="int", sections=("channel",))
def debugger_port(self):
return 4343
@option(type="string", sections=("channel",))
def gdbserver_exe(self):
return 'gdbserver'
@option(type="string", sections=("channel",))
def adg_exe(self):
return 'adg.exe'
@option(type="string", sections=("channel",))
def custom_exe(self):
return 'mintty.exe'
@option(type="string", sections=("channel",))
def custom_args(self):
return '--hold -e gdb --args'
@option(type='boolean', sections=("channel",))
def debugger_immediate_run(self):
return True
@option(type='boolean', sections=("channel",))
def must_check_if_worker_is_up_to_date(self):
return True
@option(type='boolean', sections=("channel",))
def check_worker_location(self):
return True
@option(type="int", sections=("channel",))
def number_of_workers(self):
return 1
def get_amuse_root_directory(self):
return self.amuse_root_dir
@option(type="string", sections=('data',))
def amuse_root_dir(self): # needed for location of data, so same as in support.__init__
return get_amuse_root_dir()
def check_if_worker_is_up_to_date(self, object):
if not self.must_check_if_worker_is_up_to_date:
return
name_of_the_compiled_file = self.full_name_of_the_worker
modificationtime_of_worker = os.stat(name_of_the_compiled_file).st_mtime
my_class = type(object)
for x in dir(my_class):
if x.startswith('__'):
continue
value = getattr(my_class, x)
if hasattr(value, 'crc32'):
is_up_to_date = value.is_compiled_file_up_to_date(modificationtime_of_worker)
if not is_up_to_date:
raise exceptions.CodeException("""The worker code of the '{0}' interface class is not up to date.
Please do a 'make clean; make' in the root directory.
""".format(type(object).__name__))
def get_full_name_of_the_worker(self, type):
if os.path.isabs(self.name_of_the_worker):
full_name_of_the_worker=self.name_of_the_worker
if not self.check_worker_location:
return full_name_of_the_worker
if not os.path.exists(full_name_of_the_worker):
raise exceptions.CodeException("The worker path has been specified, but it is not found: \n{0}".format(full_name_of_the_worker))
if not os.access(full_name_of_the_worker, os.X_OK):
raise exceptions.CodeException("The worker application exists, but it is not executable.\n{0}".format(full_name_of_the_worker))
return full_name_of_the_worker
exe_name = self.worker_code_prefix + self.name_of_the_worker + self.worker_code_suffix
if not self.check_worker_location:
if len(self.worker_code_directory) > 0:
full_name_of_the_worker = os.path.join(self.worker_code_directory, exe_name)
full_name_of_the_worker = os.path.normpath(os.path.abspath(full_name_of_the_worker))
return full_name_of_the_worker
else:
raise Exception("Must provide a worker_code_directory")
tried_workers = []
directory = os.path.dirname(inspect.getfile(type))
full_name_of_the_worker = os.path.join(directory, '..','..','_workers', exe_name)
full_name_of_the_worker = os.path.normpath(os.path.abspath(full_name_of_the_worker))
if os.path.exists(full_name_of_the_worker):
return full_name_of_the_worker
tried_workers.append(full_name_of_the_worker)
if len(self.worker_code_directory) > 0:
full_name_of_the_worker = os.path.join(self.worker_code_directory, exe_name)
full_name_of_the_worker = os.path.normpath(os.path.abspath(full_name_of_the_worker))
if os.path.exists(full_name_of_the_worker):
return full_name_of_the_worker
tried_workers.append(full_name_of_the_worker)
directory_of_this_module = os.path.dirname(os.path.dirname(__file__))
full_name_of_the_worker = os.path.join(directory_of_this_module, '_workers', exe_name)
full_name_of_the_worker = os.path.normpath(os.path.abspath(full_name_of_the_worker))
if os.path.exists(full_name_of_the_worker):
return full_name_of_the_worker
tried_workers.append(full_name_of_the_worker)
current_type = type
while not current_type.__bases__[0] is object:
directory_of_this_module = os.path.dirname(inspect.getfile(current_type))
full_name_of_the_worker = os.path.join(directory_of_this_module, exe_name)
full_name_of_the_worker = os.path.normpath(os.path.abspath(full_name_of_the_worker))
if os.path.exists(full_name_of_the_worker):
return full_name_of_the_worker
tried_workers.append(full_name_of_the_worker)
current_type = current_type.__bases__[0]
raise exceptions.CodeException("The worker application does not exist, it should be at: \n{0}".format('\n'.join(tried_workers)))
def send_message(self, call_id=0, function_id=-1, dtype_to_arguments={}, encoded_units = None):
pass
def recv_message(self, call_id=0, function_id=-1, handle_as_array=False, has_units = False):
pass
def nonblocking_recv_message(self, call_id=0, function_id=-1, handle_as_array=False):
pass
def start(self):
pass
def stop(self):
pass
def is_active(self):
return True
@classmethod
def is_root(self):
return True
def is_polling_supported(self):
return False
def determine_length_from_data(self, dtype_to_arguments):
def get_length(type_and_values):
argument_type, argument_values = type_and_values
if argument_values:
result = 1
for argument_value in argument_values:
try:
if not isinstance(argument_value, str):
result = max(result, len(argument_value))
except:
result = max(result, 1)
return result
lengths = [get_length(x) for x in dtype_to_arguments.items()]
if len(lengths) == 0:
return 1
return max(1, max(lengths))
def split_message(self, call_id, function_id, call_count, dtype_to_arguments, encoded_units = ()):
if call_count<=1:
raise Exception("split message called with call_count<=1")
dtype_to_result = {}
ndone=0
while ndone<call_count:
split_dtype_to_argument = {}
for key, value in dtype_to_arguments.items():
split_dtype_to_argument[key] = \
[tmp[ndone:ndone+self.max_message_length] if hasattr(tmp, '__iter__') else tmp for tmp in value]
self.send_message(
call_id,
function_id,
split_dtype_to_argument,
encoded_units=encoded_units
)
partial_dtype_to_result = self.recv_message(call_id, function_id, True)
for datatype, value in partial_dtype_to_result.items():
if not datatype in dtype_to_result:
dtype_to_result[datatype] = []
for j, element in enumerate(value):
if datatype == 'string':
dtype_to_result[datatype].append([])
else:
dtype_to_result[datatype].append(numpy.zeros((call_count,), dtype=datatype))
for j, element in enumerate(value):
if datatype == 'string':
dtype_to_result[datatype][j].extend(element)
else:
dtype_to_result[datatype][j][ndone:ndone+self.max_message_length] = element
ndone+=self.max_message_length
self._communicated_splitted_message = True
self._merged_results_splitted_message = dtype_to_result
AbstractMessageChannel.DEBUGGERS = {
"none":None,
"gdb":AbstractMessageChannel.GDB,
"lldb":AbstractMessageChannel.LLDB,
"ddd":AbstractMessageChannel.DDD,
"xterm":AbstractMessageChannel.XTERM,
"gdb-remote":AbstractMessageChannel.GDBR,
"valgrind":AbstractMessageChannel.VALGRIND,
"strace":AbstractMessageChannel.STRACE,
"custom":AbstractMessageChannel.CUSTOM
}
# import time
# import ctypes
# clib_library = ctypes.CDLL("libc.so.6")
# memcpy = clib_library.memcpy
# memcpy.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
def is_mpd_running():
"""
Determine if the MPD daemon process is running.
Needed for installations of AMUSE in a MPICH2 environment using
the default MPD daemon. The MPD deamon must be
running before the first MPI_COMN_SPAWN call is made.
Returns True for other MPI vendors (OpenMPI)
:returns: Boolean result of check whether MPD daemon is running.
:rtype: bool
>>> is_mpd_running()
True
"""
if not MpiChannel.is_supported():
return True
MpiChannel.ensure_mpi_initialized()
name_of_the_vendor, version = MPI.get_vendor()
if name_of_the_vendor == 'MPICH2':
must_check_mpd = True
if 'AMUSE_MPD_CHECK' in os.environ:
must_check_mpd = os.environ['AMUSE_MPD_CHECK'] == '1'
if 'PMI_PORT' in os.environ:
must_check_mpd = False
if 'PMI_RANK' in os.environ:
must_check_mpd = False
if 'HYDRA_CONTROL_FD' in os.environ:
must_check_mpd = False
if not must_check_mpd:
return True
try:
process = Popen(['mpdtrace'], stdout=PIPE, stderr=PIPE)
(output_string, error_string) = process.communicate()
return not (process.returncode == 255)
except OSError as ex:
return True
else:
return True
class MpiChannel(AbstractMessageChannel):
"""
Message channel based on MPI calls to send and recv the messages
:argument name_of_the_worker: Name of the application to start
:argument number_of_workers: Number of parallel processes
:argument legacy_interface_type: Type of the legacy interface
:argument debug_with_gdb: If True opens an xterm with a gdb to debug the remote process
:argument hostname: Name of the node to run the application on
"""
_mpi_is_broken_after_possible_code_crash = False
_intercomms_to_disconnect = []
_is_registered = False
_scheduler_nodes = []
_scheduler_index = 0
_scheduler_initialized = False
def __init__(self, name_of_the_worker, legacy_interface_type=None, interpreter_executable=None, **options):
AbstractMessageChannel.__init__(self, **options)
self.inuse_semaphore = threading.Semaphore()
# logging.basicConfig(level=logging.WARN)
# logger.setLevel(logging.DEBUG)
# logging.getLogger("code").setLevel(logging.DEBUG)
self.ensure_mpi_initialized()
self.name_of_the_worker = name_of_the_worker
self.interpreter_executable = interpreter_executable
if not legacy_interface_type is None:
self.full_name_of_the_worker = self.get_full_name_of_the_worker(legacy_interface_type)
else:
self.full_name_of_the_worker = self.name_of_the_worker
if self.check_mpi:
if not is_mpd_running():
raise exceptions.CodeException("The mpd daemon is not running, please make sure it is started before starting this code")
if self._mpi_is_broken_after_possible_code_crash:
raise exceptions.CodeException("Another code has crashed, cannot spawn a new code, please stop the script and retry")
if not self.hostname is None:
self.info = MPI.Info.Create()
self.info['host'] = self.hostname
else:
if self.job_scheduler:
self.info = self.get_info_from_job_scheduler(self.job_scheduler, self.number_of_workers)
else:
self.info = MPI.Info.Create()
for key,value in self.mpi_info_options.items():
self.info[key]=value
self.cached = None
self.intercomm = None
self._is_inuse = False
self._communicated_splitted_message = False
logger.debug("MPI channel created with info items: %s", str(self.info.items()))
@classmethod
def ensure_mpi_initialized(cls):
global MPI
if MPI is None:
import mpi4py.MPI
MPI = mpi4py.MPI
cls.register_finalize_code()
@classmethod
def is_threaded(cls):
#We want this for backwards compatibility with mpi4py versions < 2.0.0
#currently unused after Init/Init_threaded was removed from
#this module.
from mpi4py import rc
try:
return rc.threaded
except AttributeError:
return rc.threads
@classmethod
def register_finalize_code(cls):
if not cls._is_registered:
atexit.register(cls.finialize_mpi_atexit)
cls._is_registered = True
@classmethod
def finialize_mpi_atexit(cls):
if not MPI.Is_initialized():
return
if MPI.Is_finalized():
return
try:
for x in cls._intercomms_to_disconnect:
x.Disconnect()
except MPI.Exception as ex:
return
@classmethod
def is_multithreading_supported(cls):
return MPI.Query_thread() == MPI.THREAD_MULTIPLE
@option(type="boolean", sections=("channel",))
def check_mpi(self):
return True
@option(type="boolean", sections=("channel",))
def debug_with_gdb(self):
return False
@option(sections=("channel",))
def hostname(self):
return None
@option(choices=AbstractMessageChannel.DEBUGGERS.keys(), sections=("channel",))
def debugger(self):
"""Name of the debugger to use when starting the code"""
return "none"
@option(type="dict", sections=("channel",))
def mpi_info_options(self):
return dict()
@option(type="int", sections=("channel",))
def max_message_length(self):
"""
For calls to functions that can handle arrays, MPI messages may get too long for large N.
The MPI channel will split long messages into blocks of size max_message_length.
"""
return 1000000
@late
def redirect_stdout_file(self):
return "/dev/null"
@late
def redirect_stderr_file(self):
return "/dev/null"
@late
def debugger_method(self):
return self.DEBUGGERS[self.debugger]
@classmethod
def is_supported(cls):
if hasattr(config, 'mpi') and hasattr(config.mpi, 'is_enabled'):
if not config.mpi.is_enabled:
return False
try:
from mpi4py import MPI
return True
except ImportError:
return False
@option(type="boolean", sections=("channel",))
def can_redirect_output(self):
name_of_the_vendor, version = MPI.get_vendor()
if name_of_the_vendor == 'MPICH2':
if 'MPISPAWN_ARGV_0' in os.environ:
return False
return True
@option(type="boolean", sections=("channel",))
def must_disconnect_on_stop(self):
name_of_the_vendor, version = MPI.get_vendor()
if name_of_the_vendor == 'MPICH2':
if 'MPISPAWN_ARGV_0' in os.environ:
return False
return True
@option(type="int", sections=("channel",))
def polling_interval_in_milliseconds(self):
return 0
@classmethod
def is_root(cls):
cls.ensure_mpi_initialized()
return MPI.COMM_WORLD.rank == 0
def start(self):
logger.debug("starting mpi worker process")
logger.debug("mpi_enabled: %s", str(self.initialize_mpi))
if not self.debugger_method is None:
command, arguments = self.debugger_method(self.full_name_of_the_worker, self,
interpreter_executable=self.interpreter_executable, immediate_run=self.debugger_immediate_run)
else:
if not self.can_redirect_output or (self.redirect_stdout_file == 'none' and self.redirect_stderr_file == 'none'):
if self.interpreter_executable is None:
command = self.full_name_of_the_worker
arguments = None
else:
command = self.interpreter_executable
arguments = [self.full_name_of_the_worker]
else:
command, arguments = self.REDIRECT(self.full_name_of_the_worker, self.redirect_stdout_file, self.redirect_stderr_file, command=self.python_exe_for_redirection, interpreter_executable=self.interpreter_executable)
logger.debug("spawning %d mpi processes with command `%s`, arguments `%s` and environment '%s'", self.number_of_workers, command, arguments, os.environ)
self.intercomm = MPI.COMM_SELF.Spawn(command, arguments, self.number_of_workers, info=self.info)
logger.debug("worker spawn done")
def stop(self):
if not self.intercomm is None:
try:
if self.must_disconnect_on_stop:
self.intercomm.Disconnect()
else:
self._intercomms_to_disconnect.append(self.intercomm)
except MPI.Exception as ex:
if ex.error_class == MPI.ERR_OTHER:
type(self)._mpi_is_broken_after_possible_code_crash = True
self.intercomm = None
def determine_length_from_datax(self, dtype_to_arguments):
def get_length(x):
if x:
try:
if not isinstance(x[0], str):
return len(x[0])
except:
return 1
return 1
lengths = [get_length(x) for x in dtype_to_arguments.values()]
if len(lengths) == 0:
return 1
return max(1, max(lengths))
def send_message(self, call_id, function_id, dtype_to_arguments={}, encoded_units = ()):
if self.intercomm is None:
raise exceptions.CodeException("You've tried to send a message to a code that is not running")
call_count = self.determine_length_from_data(dtype_to_arguments)
if call_count > self.max_message_length:
self.split_message(call_id, function_id, call_count, dtype_to_arguments, encoded_units)
else:
if self.is_inuse():
raise exceptions.CodeException("You've tried to send a message to a code that is already handling a message, this is not correct")
self.inuse_semaphore.acquire()
try:
if self._is_inuse:
raise exceptions.CodeException("You've tried to send a message to a code that is already handling a message, this is not correct")
self._is_inuse = True
finally:
self.inuse_semaphore.release()
message = ServerSideMPIMessage(
call_id, function_id,
call_count, dtype_to_arguments,
encoded_units = encoded_units
)
message.send(self.intercomm)
def recv_message(self, call_id, function_id, handle_as_array, has_units = False):
if self._communicated_splitted_message:
x = self._merged_results_splitted_message
self._communicated_splitted_message = False
del self._merged_results_splitted_message
return x
message = ServerSideMPIMessage(
polling_interval=self.polling_interval_in_milliseconds * 1000
)
try:
message.receive(self.intercomm)
except MPI.Exception as ex:
self._is_inuse = False
self.stop()
raise ex
self.inuse_semaphore.acquire()
try:
if not self._is_inuse:
raise exceptions.CodeException("You've tried to recv a message to a code that is not handling a message, this is not correct")
self._is_inuse = False
finally:
self.inuse_semaphore.release()
if message.error:
error_message=message.strings[0] if len(message.strings)>0 else "no error message"
if message.call_id != call_id or message.function_id != function_id:
self.stop()
error_message+=" - code probably died, sorry."
raise exceptions.CodeException("Error in code: " + error_message)
if message.call_id != call_id:
self.stop()
raise exceptions.CodeException('Received reply for call id {0} but expected {1}'.format(message.call_id, call_id))
if message.function_id != function_id:
self.stop()
raise exceptions.CodeException('Received reply for function id {0} but expected {1}'.format(message.function_id, function_id))
if has_units:
return message.to_result(handle_as_array), message.encoded_units
else:
return message.to_result(handle_as_array)
def nonblocking_recv_message(self, call_id, function_id, handle_as_array, has_units = False):
request = ServerSideMPIMessage().nonblocking_receive(self.intercomm)
def handle_result(function):
self._is_inuse = False
message = function()
if message.error:
error_message=message.strings[0] if len(message.strings)>0 else "no error message"
if message.call_id != call_id or message.function_id != function_id:
self.stop()
error_message+=" - code probably died, sorry."
raise exceptions.CodeException("Error in (asynchronous) communication with worker: " + error_message)
if message.call_id != call_id:
self.stop()
raise exceptions.CodeException('Received reply for call id {0} but expected {1}'.format(message.call_id, call_id))
if message.function_id != function_id:
self.stop()
raise exceptions.CodeException('Received reply for function id {0} but expected {1}'.format(message.function_id, function_id))
if has_units:
return message.to_result(handle_as_array), message.encoded_units
else:
return message.to_result(handle_as_array)
request.add_result_handler(handle_result)
return request
def is_active(self):
return self.intercomm is not None
def is_inuse(self):
return self._is_inuse
def is_polling_supported(self):
return True
def __getstate__(self):
return {'state':'empty'}
def __setstate__(self, state):
self.info = MPI.INFO_NULL
self.cached = None
self.intercomm = None
self._is_inuse = False
self._communicated_splitted_message = False
self.inuse_semaphore = threading.Semaphore()
@option(sections=("channel",))
def job_scheduler(self):
"""Name of the job scheduler to use when starting the code, if given will use job scheduler to find list of hostnames for spawning"""
return ""
def get_info_from_job_scheduler(self, name, number_of_workers = 1):
if name == "slurm":
return self.get_info_from_slurm(number_of_workers)
return MPI.INFO_NULL
@classmethod
def get_info_from_slurm(cls, number_of_workers):
has_slurm_env_variables = 'SLURM_NODELIST' in os.environ and 'SLURM_TASKS_PER_NODE' in os.environ
if not has_slurm_env_variables:
return MPI.INFO_NULL
if not cls._scheduler_initialized:
nodelist = slurm.parse_slurm_nodelist(os.environ['SLURM_NODELIST'])
tasks_per_node = slurm.parse_slurm_tasks_per_node(os.environ['SLURM_TASKS_PER_NODE'])
all_nodes = []
for node, tasks in zip(nodelist, tasks_per_node):
for _ in range(tasks):
all_nodes.append(node)
cls._scheduler_nodes = all_nodes
cls._scheduler_index = 1 # start at 1 assumes that the python script is running on the first node as the first task
cls._scheduler_initialized = True
print("NODES:", cls._scheduler_nodes)
hostnames = []
count = 0
while count < number_of_workers:
hostnames.append(cls._scheduler_nodes[cls._scheduler_index])
count += 1
cls._scheduler_index += 1
if cls._scheduler_index >= len(cls._scheduler_nodes):
cls._scheduler_index = 0
host = ','.join(hostnames)
print("HOST:", host, cls._scheduler_index, os.environ['SLURM_TASKS_PER_NODE'])
info = MPI.Info.Create()
info['host'] = host #actually in mpich and openmpi, the host parameter is interpreted as a comma separated list of host names,
return info
class MultiprocessingMPIChannel(AbstractMessageChannel):
"""
Message channel based on JSON messages.
The remote party functions as a message forwarder.
Each message is forwarded to a real application using MPI.
This is message channel is a lot slower than the MPI message
channel. But, it is useful during testing with
the MPICH2 nemesis channel. As the tests will run as one
application on one node they will cause oversaturation
of the processor(s) on the node. Each legacy code
will call the MPI_FINALIZE call and this call will wait
for the MPI_FINALIZE call of the main test process. During
this wait it will consume about 10% of the processor power.
To mitigate this problem, we can use objects of this class
instead of the normal MPIChannel. Then, part of the
test is performed in a separate application (at least
as MPI sees it) and this part can be stopped after each
sub-test, thus removing unneeded applications.
"""
def __init__(self, name_of_the_worker, legacy_interface_type=None, interpreter_executable=None, **options):
AbstractMessageChannel.__init__(self, **options)
self.name_of_the_worker = name_of_the_worker
self.interpreter_executable = interpreter_executable
if not legacy_interface_type is None:
self.full_name_of_the_worker = self.get_full_name_of_the_worker(legacy_interface_type)
else:
self.full_name_of_the_worker = self.name_of_the_worker
self.process = None
@option(type="boolean")
def debug_with_gdb(self):
return False
@option
def hostname(self):
return None
def start(self):
name_of_dir = "/tmp/amuse_" + os.getenv('USER')
self.name_of_the_socket, self.server_socket = self._createAServerUNIXSocket(name_of_dir)
environment = os.environ.copy()
if 'PYTHONPATH' in environment:
environment['PYTHONPATH'] = environment['PYTHONPATH'] + ':' + self._extra_path_item(__file__)
else:
environment['PYTHONPATH'] = self._extra_path_item(__file__)
all_options = {}
for x in self.iter_options():
all_options[x.name] = getattr(self, x.name)
template = """from {3} import {4}
o = {1!r}
m = channel.MultiprocessingMPIChannel('{0}',**o)
m.run_mpi_channel('{2}')"""
modulename = type(self).__module__
packagagename, thismodulename = modulename.rsplit('.', 1)
code_string = template.format(
self.full_name_of_the_worker,
all_options,
self.name_of_the_socket,
packagagename,
thismodulename,
)
self.process = Popen([sys.executable, "-c", code_string], env=environment)
self.client_socket, undef = self.server_socket.accept()
def is_active(self):
return self.process is not None
def stop(self):
self._send(self.client_socket, ('stop', (),))
result = self._recv(self.client_socket)
self.process.wait()
self.client_socket.close()
self.server_socket.close()
self._remove_socket(self.name_of_the_socket)
self.process = None
def run_mpi_channel(self, name_of_the_socket):
channel = MpiChannel(self.full_name_of_the_worker, **self._local_options)
channel.start()
socket = self._createAClientUNIXSocket(name_of_the_socket)
try:
is_running = True
while is_running:
message, args = self._recv(socket)
result = None
if message == 'stop':
channel.stop()
is_running = False
if message == 'send_message':
result = channel.send_message(*args)
if message == 'recv_message':
result = channel.recv_message(*args)
self._send(socket, result)
finally:
socket.close()
def send_message(self, call_id=0, function_id=-1, dtype_to_arguments={}, encoded_units = ()):
self._send(self.client_socket, ('send_message', (call_id, function_id, dtype_to_arguments),))
result = self._recv(self.client_socket)
return result
def recv_message(self, call_id=0, function_id=-1, handle_as_array=False, has_units=False):
self._send(self.client_socket, ('recv_message', (call_id, function_id, handle_as_array),))
result = self._recv(self.client_socket)
return result
def _send(self, client_socket, message):
message_string = pickle.dumps(message)
header = struct.pack("i", len(message_string))
client_socket.sendall(header)
client_socket.sendall(message_string)
def _recv(self, client_socket):
header = self._receive_all(client_socket, 4)
length = struct.unpack("i", header)
message_string = self._receive_all(client_socket, length[0])
return pickle.loads(message_string)
def _receive_all(self, client_socket, number_of_bytes):
block_size = 4096
bytes_left = number_of_bytes
blocks = []
while bytes_left > 0:
if bytes_left < block_size:
block_size = bytes_left
block = client_socket.recv(block_size)
blocks.append(block)
bytes_left -= len(block)
return bytearray().join(blocks)
def _createAServerUNIXSocket(self, name_of_the_directory, name_of_the_socket=None):
import uuid
import socket
if name_of_the_socket == None:
name_of_the_socket = os.path.join(name_of_the_directory, str(uuid.uuid1()))
if not os.path.exists(name_of_the_directory):
os.makedirs(name_of_the_directory)
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._remove_socket(name_of_the_socket)
server_socket.bind(name_of_the_socket)
server_socket.listen(5)
return (name_of_the_socket, server_socket)
def _createAClientUNIXSocket(self, name_of_the_socket):
import socket
client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# client_socket.settimeout(0)header
client_socket.connect(name_of_the_socket)
return client_socket
def _remove_socket(self, name_of_the_socket):
try:
os.remove(name_of_the_socket)
except OSError:
pass
def _extra_path_item(self, path_of_the_module):
result = ''
for x in sys.path:
if path_of_the_module.startswith(x):
if len(x) > len(result):
result = x
return result
@option(choices=AbstractMessageChannel.DEBUGGERS.keys(), sections=("channel",))
def debugger(self):
"""Name of the debugger to use when starting the code"""
return "none"
@option(type="boolean")
def check_mpi(self):
return True
class SocketMessage(AbstractMessage):
def _receive_all(self, nbytes, thesocket):
# logger.debug("receiving %d bytes", nbytes)
result = []
while nbytes > 0:
chunk = min(nbytes, 10240)
data_bytes = thesocket.recv(chunk)
if len(data_bytes) == 0:
raise exceptions.CodeException("lost connection to code")
result.append(data_bytes)
nbytes -= len(data_bytes)
# logger.debug("got %d bytes, result length = %d", len(data_bytes), len(result))
if len(result) > 0:
return type(result[0])().join(result)
else:
return b""
def receive(self, socket):
# logger.debug("receiving message")
header_bytes = self._receive_all(44, socket)
flags = numpy.frombuffer(header_bytes, dtype="b", count=4, offset=0)
if flags[0] != self.big_endian:
raise exceptions.CodeException("endianness in message does not match native endianness")
if flags[1]:
self.error = True
else:
self.error = False
header = numpy.copy(numpy.frombuffer(header_bytes, dtype="i", offset=0))
# logger.debug("receiving message with flags %s and header %s", flags, header)
# id of this call
self.call_id = header[1]
# function ID
self.function_id = header[2]
# number of calls in this message
self.call_count = header[3]
# number of X's in TOTAL
number_of_ints = header[4]
number_of_longs = header[5]
number_of_floats = header[6]
number_of_doubles = header[7]
number_of_booleans = header[8]
number_of_strings = header[9]
number_of_units = header[10]
self.ints = self.receive_ints(socket, number_of_ints)
self.longs = self.receive_longs(socket, number_of_longs)
self.floats = self.receive_floats(socket, number_of_floats)
self.doubles = self.receive_doubles(socket, number_of_doubles)
self.booleans = self.receive_booleans(socket, number_of_booleans)
self.strings = self.receive_strings(socket, number_of_strings)
self.encoded_units = self.receive_doubles(socket, number_of_units)
# logger.debug("message received")
def receive_ints(self, socket, count):
if count > 0:
nbytes = count * 4 # size of int
data_bytes = self._receive_all(nbytes, socket)
result = numpy.copy(numpy.frombuffer(data_bytes, dtype='int32'))
return result
else:
return []
def receive_longs(self, socket, count):
if count > 0:
nbytes = count * 8 # size of long
data_bytes = self._receive_all(nbytes, socket)
result = numpy.copy(numpy.frombuffer(data_bytes, dtype='int64'))
return result
else:
return []
def receive_floats(self, socket, count):
if count > 0:
nbytes = count * 4 # size of float
data_bytes = self._receive_all(nbytes, socket)
result = numpy.copy(numpy.frombuffer(data_bytes, dtype='f4'))
return result
else:
return []
def receive_doubles(self, socket, count):
if count > 0:
nbytes = count * 8 # size of double
data_bytes = self._receive_all(nbytes, socket)
result = numpy.copy(numpy.frombuffer(data_bytes, dtype='f8'))
return result
else:
return []
def receive_booleans(self, socket, count):
if count > 0:
nbytes = count * 1 # size of boolean/byte
data_bytes = self._receive_all(nbytes, socket)
result = numpy.copy(numpy.frombuffer(data_bytes, dtype='b'))
return result
else:
return []
def receive_strings(self, socket, count):
if count > 0:
lengths = self.receive_ints(socket, count)
total = lengths.sum() + len(lengths)
data_bytes = self._receive_all(total, socket)
strings = []
begin = 0
for size in lengths:
strings.append(data_bytes[begin:begin + size].decode('utf-8'))
begin = begin + size + 1
return numpy.array(strings)
else:
return []
def nonblocking_receive(self, socket):
return async_request.ASyncSocketRequest(self, socket)
def send(self, socket):
flags = numpy.array([self.big_endian, self.error, len(self.encoded_units) > 0, False], dtype="b")
header = numpy.array([
self.call_id,
self.function_id,
self.call_count,
len(self.ints),
len(self.longs),
len(self.floats),
len(self.doubles),
len(self.booleans),
len(self.strings),
len(self.encoded_units),
], dtype='i')
# logger.debug("sending message with flags %s and header %s", flags, header)
socket.sendall(flags.tobytes())
socket.sendall(header.tobytes())
self.send_ints(socket, self.ints)
self.send_longs(socket, self.longs)
self.send_floats(socket, self.floats)
self.send_doubles(socket, self.doubles)
self.send_booleans(socket, self.booleans)
self.send_strings(socket, self.strings)
self.send_doubles(socket, self.encoded_units)
# logger.debug("message send")
def send_doubles(self, socket, array):
if len(array) > 0:
data_buffer = numpy.array(array, dtype='f8')
socket.sendall(data_buffer.tobytes())
def send_ints(self, socket, array):
if len(array) > 0:
data_buffer = numpy.array(array, dtype='int32')
socket.sendall(data_buffer.tobytes())
def send_floats(self, socket, array):
if len(array) > 0:
data_buffer = numpy.array(array, dtype='f4')
socket.sendall(data_buffer.tobytes())
def send_strings(self, socket, array):
if len(array) > 0:
lengths = numpy.array( [len(s) for s in array] ,dtype='int32')
chars=(chr(0).join(array)+chr(0)).encode("utf-8")
if len(chars) != lengths.sum()+len(lengths):
raise Exception("send_strings size mismatch {0} vs {1}".format( len(chars) , lengths.sum()+len(lengths) ))
self.send_ints(socket, lengths)
socket.sendall(chars)
def send_booleans(self, socket, array):
if len(array) > 0:
data_buffer = numpy.array(array, dtype='b')
socket.sendall(data_buffer.tobytes())
def send_longs(self, socket, array):
if len(array) > 0:
data_buffer = numpy.array(array, dtype='int64')
socket.sendall(data_buffer.tobytes())
class SocketChannel(AbstractMessageChannel):
def __init__(self, name_of_the_worker, legacy_interface_type=None, interpreter_executable=None, **options):
AbstractMessageChannel.__init__(self, **options)
#logging.getLogger().setLevel(logging.DEBUG)
logger.debug("initializing SocketChannel with options %s", options)
# self.name_of_the_worker = name_of_the_worker + "_sockets"
self.name_of_the_worker = name_of_the_worker
self.interpreter_executable = interpreter_executable
if self.hostname != None and self.hostname not in ['localhost',socket.gethostname()]:
raise exceptions.CodeException("can only run codes on local machine using SocketChannel, not on %s", self.hostname)
self.id = 0
if not legacy_interface_type is None:
self.full_name_of_the_worker = self.get_full_name_of_the_worker(legacy_interface_type)
else:
self.full_name_of_the_worker = self.name_of_the_worker
logger.debug("full name of worker is %s", self.full_name_of_the_worker)
self._is_inuse = False
self._communicated_splitted_message = False
self.socket = None
@option(sections=("channel",))
def mpiexec(self):
"""mpiexec with arguments"""
if len(config.mpi.mpiexec):
return config.mpi.mpiexec
return ''
@option(sections=("channel",))
def mpiexec_number_of_workers_flag(self):
"""flag to use, so that the number of workers are defined"""
return '-n'
@late
def debugger_method(self):
return self.DEBUGGERS[self.debugger]
def accept_worker_connection(self, server_socket, process):
#wait for the worker to connect. check if the process is still running once in a while
for i in range(0, 60):
#logger.debug("accepting connection")
try:
server_socket.settimeout(1.0)
return server_socket.accept()
except socket.timeout:
#update and read returncode
if process.poll() is not None:
raise exceptions.CodeException('could not connect to worker, worker process terminated')
#logger.error("worker not connecting, waiting...")
raise exceptions.CodeException('worker still not started after 60 seconds')
def start(self):
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(('', 0))
server_socket.settimeout(1.0)
server_socket.listen(1)
logger.debug("starting socket worker process, listening for worker connection on %s", server_socket.getsockname())
#this option set by CodeInterface
logger.debug("mpi_enabled: %s", str(self.initialize_mpi))
# set arguments to name of the worker, and port number we listen on
self.stdout = None
self.stderr = None
arguments = []
if not self.debugger_method is None:
command, arguments = self.debugger_method(self.full_name_of_the_worker, self, interpreter_executable=self.interpreter_executable)
else:
if self.redirect_stdout_file == 'none' and self.redirect_stderr_file == 'none':
if self.interpreter_executable is None:
command = self.full_name_of_the_worker
arguments = []
else:
command = self.interpreter_executable
arguments = [self.full_name_of_the_worker]
else:
command, arguments = self.REDIRECT(self.full_name_of_the_worker, self.redirect_stdout_file, self.redirect_stderr_file, command=self.python_exe_for_redirection, interpreter_executable=self.interpreter_executable)
#start arguments with command
arguments.insert(0, command)
if self.initialize_mpi and len(self.mpiexec) > 0:
mpiexec = shlex.split(self.mpiexec)
# prepend with mpiexec and arguments back to front
arguments.insert(0, str(self.number_of_workers))
arguments.insert(0, self.mpiexec_number_of_workers_flag)
arguments[:0] = mpiexec
command = mpiexec[0]
#append with port and hostname where the worker should connect
arguments.append(str(server_socket.getsockname()[1]))
#hostname of this machine
arguments.append(str(socket.gethostname()))
#initialize MPI inside worker executable
arguments.append('true')
else:
#append arguments with port and socket where the worker should connect
arguments.append(str(server_socket.getsockname()[1]))
#local machine
arguments.append('localhost')
#do not initialize MPI inside worker executable
arguments.append('false')
logger.debug("starting process with command `%s`, arguments `%s` and environment '%s'", command, arguments, os.environ)
self.process = Popen(arguments, executable=command, stdin=PIPE, stdout=None, stderr=None, close_fds=self.close_fds)
logger.debug("waiting for connection from worker")
self.socket, address = self.accept_worker_connection(server_socket, self.process)
self.socket.setblocking(1)
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
server_socket.close()
# logger.debug("got connection from %s", address)
# logger.info("worker %s initialized", self.name_of_the_worker)
@option(type="boolean", sections=("sockets_channel",))
def close_fds(self):
"""close open file descriptors when spawning child process"""
return True
@option(choices=AbstractMessageChannel.DEBUGGERS.keys(), sections=("channel",))
def debugger(self):
"""Name of the debugger to use when starting the code"""
return "none"
@option(sections=("channel",))
def hostname(self):
return None
def stop(self):
if (self.socket == None):
return
logger.debug("stopping socket worker %s", self.name_of_the_worker)
self.socket.close()
self.socket = None
# should lookinto using poll with a timeout or some other mechanism
# when debugger method is on, no killing
count = 0
while(count < 5):
returncode = self.process.poll()
if not returncode is None:
break
time.sleep(0.2)
count += 1
if not self.stdout is None:
self.stdout.close()
if not self.stderr is None:
self.stderr.close()
def is_active(self):
return self.socket is not None
def is_inuse(self):
return self._is_inuse
def determine_length_from_datax(self, dtype_to_arguments):
def get_length(type_and_values):
argument_type, argument_values = type_and_values
if argument_values:
result = 1
for argument_value in argument_values:
try:
if not isinstance(argument_value, str):
result = max(result, len(argument_value))
except:
result = max(result, 1)
return result
lengths = [get_length(x) for x in dtype_to_arguments.items()]
if len(lengths) == 0:
return 1
return max(1, max(lengths))
def send_message(self, call_id, function_id, dtype_to_arguments={}, encoded_units = ()):
call_count = self.determine_length_from_data(dtype_to_arguments)
# logger.info("sending message for call id %d, function %d, length %d", id, tag, length)
if self.is_inuse():
raise exceptions.CodeException("You've tried to send a message to a code that is already handling a message, this is not correct")
if self.socket is None:
raise exceptions.CodeException("You've tried to send a message to a code that is not running")
if call_count > self.max_message_length:
self.split_message(call_id, function_id, call_count, dtype_to_arguments, encoded_units)
else:
message = SocketMessage(call_id, function_id, call_count, dtype_to_arguments, encoded_units = encoded_units)
message.send(self.socket)
self._is_inuse = True
def recv_message(self, call_id, function_id, handle_as_array, has_units=False):
self._is_inuse = False
if self._communicated_splitted_message:
x = self._merged_results_splitted_message
self._communicated_splitted_message = False
del self._merged_results_splitted_message
return x
message = SocketMessage()
message.receive(self.socket)
if message.error:
error_message=message.strings[0] if len(message.strings)>0 else "no error message"
if message.call_id != call_id or message.function_id != function_id:
self.stop()
error_message+=" - code probably died, sorry."
raise exceptions.CodeException("Error in code: " + error_message)
if message.call_id != call_id:
self.stop()
raise exceptions.CodeException('Received reply for call id {0} but expected {1}'.format(message.call_id, call_id))
if message.function_id != function_id:
self.stop()
raise exceptions.CodeException('Received reply for function id {0} but expected {1}'.format(message.function_id, function_id))
if has_units:
return message.to_result(handle_as_array), message.encoded_units
else:
return message.to_result(handle_as_array)
def nonblocking_recv_message(self, call_id, function_id, handle_as_array, has_units=False):
request = SocketMessage().nonblocking_receive(self.socket)
def handle_result(function):
self._is_inuse = False
message = function()
if message.error:
error_message=message.strings[0] if len(message.strings)>0 else "no error message"
if message.call_id != call_id or message.function_id != function_id:
self.stop()
error_message+=" - code probably died, sorry."
raise exceptions.CodeException("Error in (asynchronous) communication with worker: " + error_message)
if message.call_id != call_id:
self.stop()
raise exceptions.CodeException('Received reply for call id {0} but expected {1}'.format(message.call_id, call_id))
if message.function_id != function_id:
self.stop()
raise exceptions.CodeException('Received reply for function id {0} but expected {1}'.format(message.function_id, function_id))
if has_units:
return message.to_result(handle_as_array), message.encoded_units
else:
return message.to_result(handle_as_array)
request.add_result_handler(handle_result)
return request
@option(type="int", sections=("channel",))
def max_message_length(self):
"""
For calls to functions that can handle arrays, MPI messages may get too long for large N.
The MPI channel will split long messages into blocks of size max_message_length.
"""
return 1000000
class OutputHandler(threading.Thread):
def __init__(self, stream, port):
threading.Thread.__init__(self)
self.stream = stream
logger.debug("output handler connecting to daemon at %d", port)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address = ('localhost', port)
try:
self.socket.connect(address)
except:
raise exceptions.CodeException("Could not connect to Distributed Daemon at " + str(address))
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.socket.sendall('TYPE_OUTPUT'.encode('utf-8'))
# fetch ID of this connection
result = SocketMessage()
result.receive(self.socket)
self.id = result.strings[0]
logger.debug("output handler successfully connected to daemon at %d", port)
self.daemon = True
self.start()
def run(self):
while True:
# logger.debug("receiving data for output")
data = self.socket.recv(1024)
if len(data) == 0:
# logger.debug("end of output", len(data))
return
# logger.debug("got %d bytes", len(data))
self.stream.write(data)
class DistributedChannel(AbstractMessageChannel):
default_distributed_instance = None
@staticmethod
def getStdoutID(instance):
if not hasattr(instance, "_stdoutHandler") or instance._stdoutHandler is None:
instance._stdoutHandler = OutputHandler(sys.stdout, instance.port)
return instance._stdoutHandler.id
@staticmethod
def getStderrID(instance):
if not hasattr(instance, "_stderrHandler") or instance._stderrHandler is None:
instance._stderrHandler = OutputHandler(sys.stderr, instance.port)
return instance._stderrHandler.id
def __init__(self, name_of_the_worker, legacy_interface_type=None, interpreter_executable=None,
distributed_instance=None, dynamic_python_code=False, **options):
AbstractMessageChannel.__init__(self, **options)
self._is_inuse = False
self._communicated_splitted_message = False
if distributed_instance is None:
if self.default_distributed_instance is None:
raise Exception("No default distributed instance present, and none explicitly passed to code")
self.distributed_instance = self.default_distributed_instance
else:
self.distributed_instance = distributed_instance
#logger.setLevel(logging.DEBUG)
logger.info("initializing DistributedChannel with options %s", options)
self.socket=None
self.name_of_the_worker = name_of_the_worker
self.interpreter_executable = interpreter_executable
self.dynamic_python_code = dynamic_python_code
if self.number_of_workers == 0:
self.number_of_workers = 1
if self.label == None:
self.label = ""
logger.debug("number of workers is %d, number of threads is %s, label is %s", self.number_of_workers, self.number_of_threads, self.label)
self.daemon_host = 'localhost' # Distributed process always running on the local machine
self.daemon_port = self.distributed_instance.port # Port number for the Distributed process
logger.debug("port is %d", self.daemon_port)
self.id = 0
if not legacy_interface_type is None:
# worker specified by type. Figure out where this file is
# mostly (only?) used by dynamic python codes
directory_of_this_module = os.path.dirname(inspect.getfile(legacy_interface_type))
worker_path = os.path.join(directory_of_this_module, self.name_of_the_worker)
self.full_name_of_the_worker = os.path.normpath(os.path.abspath(worker_path))
self.name_of_the_worker = os.path.basename(self.full_name_of_the_worker)
else:
# worker specified by executable (usually already absolute)
self.full_name_of_the_worker = os.path.normpath(os.path.abspath(self.name_of_the_worker))
global_options = GlobalOptions()
self.executable = os.path.relpath(self.full_name_of_the_worker, global_options.amuse_rootdirectory)
self.worker_dir = os.path.dirname(self.full_name_of_the_worker)
logger.debug("executable is %s", self.executable)
logger.debug("full name of the worker is %s", self.full_name_of_the_worker)
logger.debug("worker dir is %s", self.worker_dir)
self._is_inuse = False
def check_if_worker_is_up_to_date(self, object):
# if self.hostname != 'localhost':
# return
#
# logger.debug("hostname = %s, checking for worker", self.hostname)
#
# AbstractMessageChannel.check_if_worker_is_up_to_date(self, object)
pass
def start(self):
logger.debug("connecting to daemon")
# if redirect = none, set output file to console stdout stream ID, otherwise make absolute
if (self.redirect_stdout_file == 'none'):
self.redirect_stdout_file = self.getStdoutID(self.distributed_instance)
else:
self.redirect_stdout_file = os.path.abspath(self.redirect_stdout_file)
# if redirect = none, set error file to console stderr stream ID, otherwise make absolute
if (self.redirect_stderr_file == 'none'):
self.redirect_stderr_file = self.getStderrID(self.distributed_instance)
else:
self.redirect_stderr_file = os.path.abspath(self.redirect_stderr_file)
logger.debug("output send to = " + self.redirect_stdout_file)
logger.debug("error send to = " + self.redirect_stderr_file)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.connect((self.daemon_host, self.daemon_port))
except:
self.socket = None
raise exceptions.CodeException("Could not connect to Ibis Daemon at " + str(self.daemon_port))
self.socket.setblocking(1)
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.socket.sendall('TYPE_WORKER'.encode('utf-8'))
arguments = {'string': [self.executable, self.redirect_stdout_file, self.redirect_stderr_file, self.label, self.worker_dir], 'int32': [self.number_of_workers, self.number_of_threads], 'bool': [ self.dynamic_python_code]}
message = SocketMessage(call_id=1, function_id=10101010, call_count=1, dtype_to_arguments=arguments)
message.send(self.socket)
logger.info("waiting for worker %s to be initialized", self.name_of_the_worker)
result = SocketMessage()
result.receive(self.socket)
if result.error:
logger.error("Could not start worker: %s", result.strings[0])
self.stop()
raise exceptions.CodeException("Could not start worker for " + self.name_of_the_worker + ": " + result.strings[0])
self.remote_amuse_dir = result.strings[0]
logger.info("worker %s initialized", self.name_of_the_worker)
logger.info("worker remote amuse dir = %s", self.remote_amuse_dir)
@option(choices=AbstractMessageChannel.DEBUGGERS.keys(), sections=("channel",))
def debugger(self):
"""Name of the debugger to use when starting the code"""
return "none"
def get_amuse_root_directory(self):
return self.remote_amuse_dir
@option(type="int", sections=("channel",))
def number_of_threads(self):
return 0
@option(type="string", sections=("channel",))
def label(self):
return None
def stop(self):
if self.socket is not None:
logger.info("stopping worker %s", self.name_of_the_worker)
self.socket.close()
self.socket = None
def is_active(self):
return self.socket is not None
def is_inuse(self):
return self._is_inuse
def determine_length_from_datax(self, dtype_to_arguments):
def get_length(x):
if x:
try:
if not isinstance(x[0], str):
return len(x[0])
except:
return 1
lengths = [get_length(x) for x in dtype_to_arguments.values()]
if len(lengths) == 0:
return 1
return max(1, max(lengths))
def send_message(self, call_id, function_id, dtype_to_arguments={}, encoded_units = None):
call_count = self.determine_length_from_data(dtype_to_arguments)
logger.debug("sending message for call id %d, function %d, length %d", call_id, function_id, call_count)
if self.is_inuse():
raise exceptions.CodeException("You've tried to send a message to a code that is already handling a message, this is not correct")
if self.socket is None:
raise exceptions.CodeException("You've tried to send a message to a code that is not running")
if call_count > self.max_message_length:
self.split_message(call_id, function_id, call_count, dtype_to_arguments, encoded_units)
else:
message = SocketMessage(call_id, function_id, call_count, dtype_to_arguments, False, False)
message.send(self.socket)
self._is_inuse = True
def recv_message(self, call_id, function_id, handle_as_array, has_units=False):
self._is_inuse = False
if self._communicated_splitted_message:
x = self._merged_results_splitted_message
self._communicated_splitted_message = False
del self._merged_results_splitted_message
return x
message = SocketMessage()
message.receive(self.socket)
if message.error:
error_message=message.strings[0] if len(message.strings)>0 else "no error message"
if message.call_id != call_id or message.function_id != function_id:
#~ self.stop()
error_message+=" - code probably died, sorry."
raise exceptions.CodeException("Error in worker: " + error_message)
if has_units:
return message.to_result(handle_as_array), message.encoded_units
else:
return message.to_result(handle_as_array)
def nonblocking_recv_message(self, call_id, function_id, handle_as_array, has_units=False):
# raise exceptions.CodeException("Nonblocking receive not supported by DistributedChannel")
request = SocketMessage().nonblocking_receive(self.socket)
def handle_result(function):
self._is_inuse = False
message = function()
if message.error:
error_message=message.strings[0] if len(message.strings)>0 else "no error message"
if message.call_id != call_id or message.function_id != function_id:
self.stop()
error_message+=" - code probably died, sorry."
raise exceptions.CodeException("Error in (asynchronous) communication with worker: " + error_message)
if message.call_id != call_id:
self.stop()
raise exceptions.CodeException('Received reply for call id {0} but expected {1}'.format(message.call_id, call_id))
if message.function_id != function_id:
self.stop()
raise exceptions.CodeException('Received reply for function id {0} but expected {1}'.format(message.function_id, function_id))
if has_units:
return message.to_result(handle_as_array), message.encoded_units
else:
return message.to_result(handle_as_array)
request.add_result_handler(handle_result)
return request
@option(type="int", sections=("channel",))
def max_message_length(self):
"""
For calls to functions that can handle arrays, MPI messages may get too long for large N.
The MPI channel will split long messages into blocks of size max_message_length.
"""
return 1000000
class LocalChannel(AbstractMessageChannel):
def __init__(self, name_of_the_worker, legacy_interface_type=None, interpreter_executable=None,
distributed_instance=None, dynamic_python_code=False, **options):
AbstractMessageChannel.__init__(self, **options)
MpiChannel.ensure_mpi_initialized()
if not legacy_interface_type is None:
self.so_module = legacy_interface_type.__so_module__
self.package, _ = legacy_interface_type.__module__.rsplit('.',1)
else:
raise Exception("Need to give the legacy interface type for the local channel")
self.legacy_interface_type = legacy_interface_type
self._is_inuse = False
self.module = None
def check_if_worker_is_up_to_date(self, object):
pass
def start(self):
from . import import_module
from . import python_code
module = import_module.import_unique(self.package + "." + self.so_module)
print(module, self.package + "." + self.so_module)
module.set_comm_world(MPI.COMM_SELF)
self.local_implementation = python_code.CythonImplementation(module, self.legacy_interface_type)
self.module = module
def stop(self):
from . import import_module
import_module.cleanup_module(self.module)
self.module = None
def is_active(self):
return not self.module is None
def is_inuse(self):
return self._is_inuse
def send_message(self, call_id, function_id, dtype_to_arguments={}, encoded_units = None):
call_count = self.determine_length_from_data(dtype_to_arguments)
self.message = LocalMessage(call_id, function_id, call_count, dtype_to_arguments, encoded_units = encoded_units)
self.is_inuse = True
def recv_message(self, call_id, function_id, handle_as_array, has_units=False):
output_message = LocalMessage(call_id, function_id, self.message.call_count)
self.local_implementation.handle_message(self.message, output_message)
if has_units:
return output_message.to_result(handle_as_array),output_message.encoded_units
else:
return output_message.to_result(handle_as_array)
def nonblocking_recv_message(self, call_id, function_id, handle_as_array):
pass
def determine_length_from_datax(self, dtype_to_arguments):
def get_length(x):
if x:
try:
if not isinstance(x[0], str):
return len(x[0])
except:
return 1
return 1
lengths = [get_length(x) for x in dtype_to_arguments.values()]
if len(lengths) == 0:
return 1
return max(1, max(lengths))
def is_polling_supported(self):
return False
class LocalMessage(AbstractMessage):
pass
| 88,683
| 34.962693
| 228
|
py
|
amuse
|
amuse-main/src/amuse/rfi/gencode.py
|
import sys
import os.path
import os
from optparse import OptionParser
# Should probably use an absolute import here (support.config), but
# we're not guaranteed this script will always be in a support
# subdirectory with an __init__.py file.
#try: # running as a module
# from . import config
#except (ImportError, ValueError): # running as a stand-alone script
# import config
# setup_sys_path()
# this should not be necessary?
sys.path.insert(0,os.getcwd())
from amuse import config
from amuse.rfi.tools import create_c
from amuse.rfi.tools import create_fortran
from amuse.rfi.tools import create_java
from amuse.rfi.tools import create_dir
from amuse.rfi.tools import create_python_worker
from amuse.support import get_amuse_root_dir
from amuse.support.literature import TrackLiteratureReferences
def get_amuse_directory():
filename_of_this_script = __file__
directory_of_this_script = os.path.dirname(os.path.dirname(filename_of_this_script))
directory_of_this_script = os.path.join(directory_of_this_script, 'build', 'lib')
if os.path.isabs(directory_of_this_script):
return directory_of_this_script
else:
return os.path.abspath(directory_of_this_script)
# in case of trouble consult old python 2:
#~ def get_amuse_directory():
#~ filename_of_this_script = __file__
#~ directory_of_this_script = os.path.dirname(os.path.dirname(filename_of_this_script))
#~ if os.path.isabs(directory_of_this_script):
#~ return directory_of_this_script
#~ else:
#~ return os.path.abspath(directory_of_this_script)
def get_amuse_directory_root():
filename_of_this_script = __file__
directory_of_this_script = os.path.dirname(os.path.dirname(filename_of_this_script))
if os.path.isabs(directory_of_this_script):
return directory_of_this_script
else:
return os.path.abspath(directory_of_this_script)
def setup_sys_path():
amuse_directory = os.environ["AMUSE_DIR"]
sys.path.insert(0, amuse_directory)
try:
src_root_directory = os.environ["MUSE_PACKAGE_DIR"]
sys.path.insert(0, src_root_directory)
except:
src_root_directory = amuse_directory
sys.path.insert(0, os.path.join(src_root_directory,"src"))
sys.path.append(os.getcwd())
class ParseCommandLine(object):
usage = """usage: %prog [options] name_of_module name_of_class_in_module.
or: %prog --mode=dir name_of_the_code
This script will generate code from the class with name <name_of_class_in_module>. The
class must be defined in the module <name_of_module>. The module name
can be a python file or the python module name.
If mode is dir the script will create a directory with all files
needed to start creating a code interface.
This script handles all code generation for the AMUSE framework. It can
be used to create C++ or Fortran code to handle the MPI messages,
create a header file or create stub code as a start for defining
the interface between the code and AMUSE.
Examples
--------
To generate code for interfacing with MPI do:
%prog --type=c --mode=mpi test.py TestInterface
or (for fortran):
%prog --type=f90 --mode=mpi test.py TestInterface
To generate a header file do (for C):
%prog --type=h test.py TestInterface
or (for C++):
%prog --type=H test.py TestInterface
To generate a stub file do:
%prog --type=c --mode=stub test.py TestInterface
or (for fortran):
%prog --type=f90 --mode=stub test.py TestInterface
To generate create a directory and put files in it do:
%prog --type=c --mode=dir MyCode
or (for fortran):
%prog --type=f90 --mode=dir MyCode
To see a description of all arguments do:
%prog --help
"""
def __init__(self):
self.parser = OptionParser(self.usage)
#~ self.parser.prog = 'build.py' #hack to set the name, for reporting errors and help
self.parser.add_option(
"-t",
"--type",
choices=["c","h", "H", "f90", "py", "java"],
default="c",
dest="type",
help="TYPE of the code to generate. Can be one of c, h, H, f90, py or java. <c> will generate c code. <h/H> will generate c/c++ header. <f90> will generate fortran 90 code. <py> will generate a python worker wrapper <java> will generate java interface or class, depending on mode. (Defaults to c)")
self.parser.add_option(
"-m",
"--mode",
choices=["mpi","stub", "dir", "sockets", "interface", "class", "script"],
default="mpi",
dest="mode",
help="MODE of the code to generate. Can be <mpi>, <stub>, <dir>,<sockets>, <interface>, <class> or <script>. Generate the MPI handling code or STUB code for the link between mpi and the code (if needed). <dir> will create a directory ann populate it with the files needed to build a code. (Defaults to mpi)")
self.parser.add_option(
"-o",
"--output",
default="-",
dest="output",
help="Name of the OUTPUT file. Use - for standard out. ")
self.parser.add_option(
"-i",
"--ignore",
default="",
dest="ignore",
help="Name of the classes to ignore, functions defined on these classes will not generate code. Comma separated list")
self.parser.add_option(
"-u",
"--underscore",
default="",
dest="underscore",
help="Name of the classes to underscore the functions of, for XL fortran compilers")
self.parser.add_option(
"-n",
"--needs-mpi",
default="true",
dest="needs_mpi",
help="If this boolean flag is set, the worker will initialize mpi, even in the sockets channel is used. Defaults to true")
self.parser.add_option(
"-x",
"--executable",
action="store_true",
default=False,
dest="make_executable",
help="Set the executable bit when generating the output file")
self.parser.add_option(
"--get-amuse-dir",
action="store_true",
default=False,
dest="get_amuse_dir",
help="Only output amuse directory")
self.parser.add_option(
"--get-amuse-configmk",
action="store_true",
default=False,
dest="get_amuse_configmk",
help="dump amuse config.mk")
self.options = None
self.arguments = None
def parse_options(self):
(self.options, self.arguments) = self.parser.parse_args()
if self.options.ignore:
self.options.ignore_classes = list(self.parse_ignore_classes())
else:
self.options.ignore_classes = []
if self.options.underscore:
self.options.underscore_classes = list(self.parse_underscore_classes())
else:
self.options.underscore_classes = []
self.options.name_of_implementation_class = None
self.options.name_of_module_or_python_file = None
self.options.name_of_class = None
self.options.name_of_the_code = None
def parse_arguments(self):
if self.options.get_amuse_dir or self.options.get_amuse_configmk:
return
if self.options.mode == 'dir':
if len(self.arguments) != 1:
self.show_error_and_exit("incorrect number of arguments, need name of the code")
self.options.name_of_the_code = self.arguments[0]
else:
if not len(self.arguments) in (2,3) :
self.show_error_and_exit("incorrect number of arguments")
try:
self.options.name_of_module_or_python_file = self.arguments[0]
if len(self.arguments) > 1:
self.options.name_of_class = self.arguments[1]
if len(self.arguments) > 2:
self.options.name_of_implementation_class = self.arguments[2]
except Exception as exception:
self.show_error_and_exit(exception)
def parse_ignore_classes(self):
names = self.options.ignore.split(',')
for name in names:
index_of_module_classname_split = name.rfind('.')
modulename = name[:index_of_module_classname_split]
classname = name[index_of_module_classname_split+1:]
__import__(modulename)
class_to_ignore = getattr(sys.modules[modulename], classname)
yield class_to_ignore
def parse_underscore_classes(self):
names = self.options.underscore.split(',')
for name in names:
index_of_module_classname_split = name.rfind('.')
modulename = name[:index_of_module_classname_split]
classname = name[index_of_module_classname_split+1:]
__import__(modulename)
class_to_underscore = getattr(sys.modules[modulename], classname)
yield class_to_underscore
def start(self):
self.parse_options()
self.parse_arguments()
def show_error_and_exit(self, exception):
self.parser.error(exception)
def module_name(string):
if string.endswith('.py'):
amuse_src_directory = os.path.join(get_amuse_directory(), 'src')
if not os.path.isabs(string):
string = os.path.join(os.getcwd(), string)
if not os.path.exists(string):
raise Exception("Cannot find file with name {0}".format(string))
if not string.startswith(amuse_src_directory):
raise Exception("File {0} must be placed under directory {1}.".format(string, amuse_src_directory))
string = string[len(amuse_src_directory)+1:]
string = string[:-len('.py')]
string = string.replace(os.sep, '.')
return string
def make_cplusplus_header():
result = create_c.GenerateACHeaderStringFromASpecificationClass()
result.make_extern_c = False
return result
def make_a_python_worker(channel_type):
result = create_python_worker.CreateAPythonWorker()
result.channel_type = channel_type
return result
def make_a_mpi_python_worker():
return make_a_python_worker('mpi')
def make_a_socket_python_worker():
return make_a_python_worker('sockets')
def make_file(uc):
settings=uc.options
implementation_class = None
try:
if settings.name_of_module_or_python_file.endswith('.py'):
module = {}
# Replace with runpy in the future?
with open(settings.name_of_module_or_python_file) as fh:
text = fh.read()
code = compile(text, settings.name_of_module_or_python_file, 'exec')
exec(code, module)
#execfile(settings.name_of_module_or_python_file, module)
specification_class = module[settings.name_of_class]
if not settings.name_of_implementation_class is None:
implementation_class = module[settings.name_of_implementation_class]
else:
module = __import__(settings.name_of_module_or_python_file,fromlist=[settings.name_of_class])
specification_class = getattr(module, settings.name_of_class)
if not settings.name_of_implementation_class is None:
implementation_class = getattr(module, settings.name_of_implementation_class)
except ImportError as exception:
uc.show_error_and_exit(exception)
usecases = {
('c','mpi'): create_c.GenerateACSourcecodeStringFromASpecificationClass,
('h','mpi'): create_c.GenerateACHeaderStringFromASpecificationClass,
('H','mpi'): make_cplusplus_header,
('f90','mpi'): create_fortran.GenerateAFortranSourcecodeStringFromASpecificationClass,
('c','stub'): create_c.GenerateACStubStringFromASpecificationClass,
('f90','stub'): create_fortran.GenerateAFortranStubStringFromASpecificationClass,
('java','interface'): create_java.GenerateAJavaInterfaceStringFromASpecificationClass,
('java','class'): create_java.GenerateAJavaSourcecodeStringFromASpecificationClass,
('java','script'): create_java.GenerateAJavaWorkerScript,
('py','sockets'): make_a_socket_python_worker,
('py','mpi'): make_a_mpi_python_worker,
}
try:
builder = usecases[(settings.type, settings.mode)]()
builder.specification_class = specification_class
if not implementation_class is None:
builder.implementation_factory = implementation_class
builder.ignore_functions_from_specification_classes = settings.ignore_classes
builder.underscore_functions_from_specification_classes = settings.underscore_classes
builder.needs_mpi = settings.needs_mpi.lower() == 'true'
builder.is_mpi_enabled = config.mpi.is_enabled
builder.name_of_outputfile = settings.output
except:
uc.show_error_and_exit("'{0}' and '{1}' is not a valid combination of type and mode, cannot generate the code".format(settings.type, settings.mode))
if settings.output == '-':
sys.stdout.write(str(builder.result) + '\n')
else:
try:
with open(settings.output, "w") as f:
f.write(builder.result)
if settings.make_executable:
os.chmod(settings.output, 0o755)
except Exception as exception:
uc.show_error_and_exit(exception)
def make_directory(uc):
settings=uc.options
usecases = {
('c','dir'): create_dir.CreateADirectoryAndPopulateItWithFilesForACCode,
('f90','dir'): create_dir.CreateADirectoryAndPopulateItWithFilesForAFortranCode,
}
try:
builder = usecases[(settings.type, settings.mode)]()
builder.name_of_the_code_interface_class = settings.name_of_the_code
builder.path_of_the_root_directory = os.getcwd()
except:
uc.show_error_and_exit("'{0}' and '{1}' is not a valid combination of type and mode, cannot generate the code".format(settings.type, settings.mode))
builder.start()
def amusifier():
TrackLiteratureReferences.suppress_output()
uc = ParseCommandLine()
uc.start()
if uc.options.get_amuse_dir:
print(get_amuse_root_dir())
exit(0)
elif uc.options.get_amuse_configmk:
with open(os.path.join(get_amuse_root_dir(), "config.mk")) as f:
print(f.read())
exit(0)
elif uc.options.mode == 'dir':
make_directory(uc)
else:
make_file(uc)
if __name__ == '__main__':
amusifier()
| 15,246
| 37.022444
| 320
|
py
|
amuse
|
amuse-main/src/amuse/rfi/__init__.py
| 0
| 0
| 0
|
py
|
|
amuse
|
amuse-main/src/amuse/rfi/tools/create_java.py
|
from amuse.support.core import late
from amuse.support import exceptions, options
from amuse import config
from amuse.rfi.tools.create_code import GenerateASourcecodeString
from amuse.rfi.tools.create_code import GenerateASourcecodeStringFromASpecificationClass
from amuse.rfi.tools.create_code import DTypeSpec
from amuse.rfi.tools.create_code import DTypeToSpecDictionary
from amuse.rfi.core import LegacyFunctionSpecification
import sys
import os
import inspect
dtype_to_spec = DTypeToSpecDictionary({
'int32' : DTypeSpec('Int', 'Int', '', 'int', ''),
'int64' : DTypeSpec('Long', 'Long',
'', 'long', ''),
'float32' : DTypeSpec('Float', 'Float',
'', 'float', ''),
'float64' : DTypeSpec('Double', 'Double',
'', 'double', ''),
'bool' : DTypeSpec('Boolean', 'Boolean',
'', 'boolean', ''),
'string' : DTypeSpec('String', 'String',
'', 'String', ''),
})
IMPORTS_CODE_STRING = """
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.IntBuffer;
import java.nio.LongBuffer;
import java.nio.FloatBuffer;
import java.nio.DoubleBuffer;
import java.nio.channels.SocketChannel;
"""
AMUSE_MESSAGE_CLASS_CODE_STRING = """
public static class AmuseMessage {
public static final int HEADER_SIZE = 11; // integers
// 4 byte flags field.
public static final int HEADER_FLAGS = 0;
// content of flags field (first 4 bytes of message header) currently:
// - endianness
// - if an exception has occurred
public static final int HEADER_BIG_ENDIAN_FLAG = 0;
public static final int HEADER_ERROR_FLAG = 1;
public static final int HEADER_CALL_ID_INDEX = 1;
public static final int HEADER_FUNCTION_ID_INDEX = 2;
public static final int HEADER_CALL_COUNT_INDEX = 3;
public static final int HEADER_INT_COUNT_INDEX = 4;
public static final int HEADER_LONG_COUNT_INDEX = 5;
public static final int HEADER_FLOAT_COUNT_INDEX = 6;
public static final int HEADER_DOUBLE_COUNT_INDEX = 7;
public static final int HEADER_BOOLEAN_COUNT_INDEX = 8;
public static final int HEADER_STRING_COUNT_INDEX = 9;
public static final int HEADER_UNITS_COUNT_INDEX = 10;
public static final int SIZEOF_INT = 4;
public static final int SIZEOF_LONG = 8;
public static final int SIZEOF_FLOAT = 4;
public static final int SIZEOF_DOUBLE = 8;
public static final int SIZEOF_BOOLEAN = 1;
public static final byte TRUE_BYTE = (1 & 0xFF);
public static final byte FALSE_BYTE = (0 & 0xFF);
public static final int FUNCTION_ID_INIT = 10101010;
public static final int FUNCTION_ID_STOP = 0;
public static final int FUNCTION_ID_REDIRECT_OUTPUT = 1141573512;
private static boolean hasRemaining(ByteBuffer... buffers) {
for (ByteBuffer buffer : buffers) {
if (buffer.hasRemaining()) {
return true;
}
}
return false;
}
static void readAll(SocketChannel channel, ByteBuffer... bytes) throws IOException {
while (hasRemaining(bytes)) {
long read = channel.read(bytes);
if (read == -1) {
throw new IOException("Connection closed on reading data");
}
}
}
private final ByteBuffer headerBytes;
private ByteBuffer intBytes;
private ByteBuffer longBytes;
private ByteBuffer floatBytes;
private ByteBuffer doubleBytes;
private ByteBuffer booleanBytes;
private ByteBuffer stringHeaderBytes;
private ByteBuffer[] byteBuffers;
private ByteBuffer[] allButStringByteBuffers;
// UTF-8 encoded strings
private ByteBuffer[] stringBytes;
// view of buffers (for easy access)
private IntBuffer header;
private IntBuffer stringHeader;
/**
* Empty message.
*/
public AmuseMessage() {
headerBytes = ByteBuffer.allocateDirect(HEADER_SIZE * SIZEOF_INT);
intBytes = ByteBuffer.allocateDirect(0);
longBytes = ByteBuffer.allocateDirect(0);
floatBytes = ByteBuffer.allocateDirect(0);
doubleBytes = ByteBuffer.allocateDirect(0);
booleanBytes = ByteBuffer.allocateDirect(0);
stringHeaderBytes = ByteBuffer.allocateDirect(0);
stringBytes = new ByteBuffer[0];
allButStringByteBuffers = new ByteBuffer[] { headerBytes, intBytes, longBytes, floatBytes, doubleBytes,
booleanBytes, stringHeaderBytes };
// no string buffers yet
byteBuffers = allButStringByteBuffers;
ByteOrder nativeOrder = ByteOrder.nativeOrder();
for (ByteBuffer buffer : byteBuffers) {
buffer.order(nativeOrder);
}
header = headerBytes.asIntBuffer();
stringHeader = stringHeaderBytes.asIntBuffer();
}
AmuseMessage(int callID, int functionID, int count) {
this();
setCallID(callID);
setFunctionID(functionID);
setCallCount(count);
}
/**
* Massages with an exception
*
* @param callID
* id of the call that generated the exception
* @param functionID
* function id tried
* @param error
* a description of the error that occurred
*/
AmuseMessage(int callID, int functionID, int count, String error) {
this();
setCallID(callID);
setFunctionID(functionID);
setCallCount(count);
setError(error);
}
public void clear() {
headerBytes.clear();
// stuff full of zeros
byte[] zeros = new byte[headerBytes.capacity()];
// remember byte order
zeros[HEADER_BIG_ENDIAN_FLAG] = headerBytes.get(HEADER_BIG_ENDIAN_FLAG);
headerBytes.put(zeros);
}
/**
* Change the byte order of this message.
*
* @param order
* The new byte-order
*/
private void setByteOrder(ByteOrder order) {
if (order == ByteOrder.BIG_ENDIAN) {
headerBytes.put(HEADER_BIG_ENDIAN_FLAG, TRUE_BYTE);
} else {
headerBytes.put(HEADER_BIG_ENDIAN_FLAG, FALSE_BYTE);
}
for (ByteBuffer buffer : getByteBuffers(false)) {
buffer.order(order);
}
// re-create views, as the order-change may not become visible
// otherwise
headerBytes.clear();
header = headerBytes.asIntBuffer();
stringHeaderBytes.clear();
stringHeader = stringHeaderBytes.asIntBuffer();
}
/**
* Change the byte order of this message. Also swaps the content of all
* the buffers, if requested.
*
* @param order
* The new byte-order
* @param swapContent
* if True, all data contained in this message is byte-order
* swapped.
* @throws IOException
* if the byte order cannot be determined
*/
void setByteOrder(ByteOrder order, boolean swapContent) throws IOException {
ByteOrder oldOrder = getByteOrder();
if (order == oldOrder) {
// done! :-)
return;
}
throw new IOException("byte swapping not implemented yet!");
}
private ByteOrder getByteOrder() {
if (headerBytes.get(HEADER_BIG_ENDIAN_FLAG) == TRUE_BYTE) {
return ByteOrder.BIG_ENDIAN;
} else if (headerBytes.get(HEADER_BIG_ENDIAN_FLAG) == FALSE_BYTE) {
return ByteOrder.LITTLE_ENDIAN;
} else {
throw new RuntimeException("endiannes not specified in header");
}
}
public void setCallCount(int count) {
header.put(HEADER_CALL_COUNT_INDEX, count);
}
public void setFunctionID(int functionID) {
header.put(HEADER_FUNCTION_ID_INDEX, functionID);
}
public void setCallID(int callID) {
header.put(HEADER_CALL_ID_INDEX, callID);
}
public void setError(String error) {
if (error == null) {
error = "<empty>";
}
// clear data from message
header.put(HEADER_INT_COUNT_INDEX, 0);
header.put(HEADER_LONG_COUNT_INDEX, 0);
header.put(HEADER_FLOAT_COUNT_INDEX, 0);
header.put(HEADER_DOUBLE_COUNT_INDEX, 0);
header.put(HEADER_BOOLEAN_COUNT_INDEX, 0);
header.put(HEADER_STRING_COUNT_INDEX, 1);
header.put(HEADER_UNITS_COUNT_INDEX, 0);
// set error state
headerBytes.put(HEADER_ERROR_FLAG, TRUE_BYTE);
ensurePrimitiveCapacity();
try {
// set first string to exception message
byte[] bytes;
bytes = error.getBytes("UTF-8");
stringHeader.put(0, bytes.length);
ensureStringsCapacity(0);
stringBytes[0].clear();
stringBytes[0].put(bytes);
stringBytes[0].put( (byte) 0); // add extra zero
} catch (UnsupportedEncodingException e) {
System.err.println("could not set error: " + e);
stringHeader.put(0, 0);
}
}
public void setBoolean(int index, boolean value) {
if (value) {
booleanBytes.put(index, TRUE_BYTE);
} else {
booleanBytes.put(index, FALSE_BYTE);
}
}
public void addString(String value) {
int position = header.get(HEADER_STRING_COUNT_INDEX);
// add an extra string
header.put(HEADER_STRING_COUNT_INDEX, position + 1);
// make sure there is space in the header for the length of the
// string
ensurePrimitiveCapacity();
// encode string to UTF-8
byte[] bytes;
try {
if (value == null) {
//set null values to an empty string
bytes = new String().getBytes("UTF-8");
} else {
bytes = value.getBytes("UTF-8");
}
// set length of string in header
stringHeader.put(position, bytes.length);
// make sure there is space for the string
ensureStringsCapacity(position);
stringBytes[position].clear();
stringBytes[position].put(bytes);
stringBytes[position].put( (byte) 0); // add extra zero
} catch (UnsupportedEncodingException e) {
System.err.println("ERROR! UTF-8 not supported by the JVM!");
}
}
public void setString(int index, String value) {
// encode string to UTF-8
byte[] bytes;
try {
if (value == null) {
//set null values to an empty string
bytes = new String().getBytes("UTF-8");
} else {
bytes = value.getBytes("UTF-8");
}
// set length of string in header
stringHeader.put(index, bytes.length);
// make sure there is space for the string
ensureStringsCapacity(index);
stringBytes[index].clear();
stringBytes[index].put(bytes);
stringBytes[index].put( (byte) 0); // add extra zero
} catch (UnsupportedEncodingException e) {
System.err.println("ERROR! UTF-8 not supported by the JVM!");
}
}
public boolean isErrorState() {
return headerBytes.get(HEADER_ERROR_FLAG) == TRUE_BYTE;
}
public String getError() throws IOException {
if (!isErrorState()) {
return null;
}
return getString(0);
}
public int getCallID() {
return header.get(HEADER_CALL_ID_INDEX);
}
public int getFunctionID() {
return header.get(HEADER_FUNCTION_ID_INDEX);
}
public int getCallCount() {
return header.get(HEADER_CALL_COUNT_INDEX);
}
public int getIntCount() {
return header.get(HEADER_INT_COUNT_INDEX);
}
public int getLongCount() {
return header.get(HEADER_LONG_COUNT_INDEX);
}
public int getFloatCount() {
return header.get(HEADER_FLOAT_COUNT_INDEX);
}
public int getDoubleCount() {
return header.get(HEADER_DOUBLE_COUNT_INDEX);
}
public int getBooleanCount() {
return header.get(HEADER_BOOLEAN_COUNT_INDEX);
}
public int getStringCount() {
return header.get(HEADER_STRING_COUNT_INDEX);
}
public String getString(int index) throws IOException {
if (getStringCount() <= index) {
throw new IOException("cannot get string at index " + index + " in call" + this);
}
if (stringBytes.length <= index) {
throw new IOException("cannot get string at index " + index + " in call" + this
+ " header does not match content!");
}
int utf8length = stringHeader.get(index);
if (stringBytes[index].hasArray()) {
return new String(stringBytes[index].array(), 0, utf8length, "UTF-8");
}
byte[] bytes = new byte[utf8length];
stringBytes[index].position(0);
stringBytes[index].limit(utf8length+1); // account for extra zero
stringBytes[index].get(bytes);
return new String(bytes, 0, utf8length, "UTF-8");
}
public boolean getBoolean(int index) {
byte rawByte = booleanBytes.get(index);
return rawByte == TRUE_BYTE;
}
public int getInteger(int index) {
return intBytes.getInt(index * SIZEOF_INT);
}
/**
* Get all buffers, possibly including the buffers containing the
* strings.
*
* @return all buffers.
*
* @param includeStringBuffers
* if true, the buffers for holding the values of strings
* will be included.
*/
public ByteBuffer[] getByteBuffers(boolean includeStringBuffers) {
if (includeStringBuffers) {
return byteBuffers;
} else {
return allButStringByteBuffers;
}
}
public ByteBuffer[] getStringByteBuffers() {
return stringBytes;
}
private void setPrimitiveLimitsFromHeader() throws IOException {
intBytes.clear().limit(getIntCount() * SIZEOF_INT);
longBytes.clear().limit(getLongCount() * SIZEOF_LONG);
floatBytes.clear().limit(getFloatCount() * SIZEOF_FLOAT);
doubleBytes.clear().limit(getDoubleCount() * SIZEOF_DOUBLE);
booleanBytes.clear().limit(getBooleanCount() * SIZEOF_BOOLEAN);
stringHeaderBytes.clear().limit(getStringCount() * SIZEOF_INT);
}
private void setStringLimitsFromHeader() throws IOException {
if (getStringCount() > stringBytes.length) {
throw new IOException(
"Amuse message in inconsistent state, strign count greater than number of string buffers");
}
for (int i = 0; i < getStringCount(); i++) {
int utf8Length = stringHeader.get(i);
stringBytes[i].clear().limit(utf8Length + 1); // account for extra zero
}
// set the limit of the rest of the string bytes to 0
for (int i = getStringCount(); i < stringBytes.length; i++) {
stringBytes[i].limit(0);
}
}
void writeTo(SocketChannel channel) throws IOException {
//System.err.prinln("writing to socket channel: " + this.toContentString());
//System.err.prinln("writing to socket channel: " + this);
headerBytes.clear();
setPrimitiveLimitsFromHeader();
setStringLimitsFromHeader();
// write all bufferd to channel
boolean done = false;
while(!done) {
channel.write(byteBuffers);
done = true;
for (ByteBuffer buffer : byteBuffers) {
if (buffer.hasRemaining()) {
done = false;
}
}
}
// alternative, debugging version of writing buffers
// for (ByteBuffer buffer : byteBuffers) {
// //System.err.println("writing " + buffer + " of length "
// + buffer.remaining());
// channel.write(buffer);
//
// if (buffer.hasRemaining()) {
// System.err.println("Error! not all bytes written "
// + buffer.remaining());
// }
// }
}
// make sure there is enough space for each primitive buffer
// (including the string header)
public boolean ensurePrimitiveCapacity() {
boolean buffersUpdated = false;
if (getIntCount() * SIZEOF_INT > intBytes.capacity()) {
intBytes = ByteBuffer.allocateDirect(getIntCount() * SIZEOF_INT);
intBytes.order(getByteOrder());
buffersUpdated = true;
}
if (getLongCount() * SIZEOF_LONG > longBytes.capacity()) {
longBytes = ByteBuffer.allocateDirect(getLongCount() * SIZEOF_LONG);
longBytes.order(getByteOrder());
buffersUpdated = true;
}
if (getFloatCount() * SIZEOF_FLOAT > floatBytes.capacity()) {
floatBytes = ByteBuffer.allocateDirect(getFloatCount() * SIZEOF_FLOAT);
floatBytes.order(getByteOrder());
buffersUpdated = true;
}
if (getDoubleCount() * SIZEOF_DOUBLE > doubleBytes.capacity()) {
doubleBytes = ByteBuffer.allocateDirect(getDoubleCount() * SIZEOF_DOUBLE);
doubleBytes.order(getByteOrder());
buffersUpdated = true;
}
if (getBooleanCount() * SIZEOF_BOOLEAN > booleanBytes.capacity()) {
booleanBytes = ByteBuffer.allocateDirect(getBooleanCount() * SIZEOF_BOOLEAN);
booleanBytes.order(getByteOrder());
buffersUpdated = true;
}
if (getStringCount() * SIZEOF_INT > stringHeaderBytes.capacity()) {
stringHeaderBytes = ByteBuffer.allocateDirect(getStringCount() * SIZEOF_INT);
stringHeaderBytes.order(getByteOrder());
stringHeader = stringHeaderBytes.asIntBuffer();
buffersUpdated = true;
}
if (buffersUpdated) {
allButStringByteBuffers = new ByteBuffer[] { headerBytes, intBytes, longBytes, floatBytes, doubleBytes,
booleanBytes, stringHeaderBytes };
// update byte buffers array
ByteBuffer[] newByteBuffers = new ByteBuffer[allButStringByteBuffers.length + stringBytes.length];
for (int i = 0; i < allButStringByteBuffers.length; i++) {
newByteBuffers[i] = allButStringByteBuffers[i];
}
for (int i = 0; i < stringBytes.length; i++) {
newByteBuffers[allButStringByteBuffers.length + i] = stringBytes[i];
}
byteBuffers = newByteBuffers;
//System.err.println("ensurePrimitiveCapacity() Updated buffers to " + Arrays.toString(byteBuffers));
}
return buffersUpdated;
}
public boolean ensureStringsCapacity() {
// checking if the string header is big enough is checked above, so
// we
// only check if all strings listed in the header
boolean buffersUpdated = false;
if (stringBytes.length < getStringCount()) {
ByteBuffer[] oldStringBytes = stringBytes;
stringBytes = new ByteBuffer[getStringCount()];
for (int i = 0; i < oldStringBytes.length; i++) {
stringBytes[i] = oldStringBytes[i];
}
buffersUpdated = true;
}
for (int i = 0; i < getStringCount(); i++) {
int stringLength = stringHeader.get(i) +1; // account for extra zero
if (stringBytes[i] == null || stringLength > stringBytes[i].capacity()) {
stringBytes[i] = ByteBuffer.allocateDirect(stringLength);
buffersUpdated = true;
}
}
if (buffersUpdated) {
// update byte buffers array
ByteBuffer[] newByteBuffers = new ByteBuffer[allButStringByteBuffers.length + stringBytes.length];
for (int i = 0; i < allButStringByteBuffers.length; i++) {
newByteBuffers[i] = allButStringByteBuffers[i];
}
for (int i = 0; i < stringBytes.length; i++) {
newByteBuffers[allButStringByteBuffers.length + i] = stringBytes[i];
}
byteBuffers = newByteBuffers;
//System.err.println("ensureStringsCapacity() Updated buffers to " + Arrays.toString(byteBuffers));
}
return buffersUpdated;
}
public boolean ensureStringsCapacity(int index) {
// checking if the string header is big enough is checked above, so
// we
// only check if all strings listed in the header
boolean buffersUpdated = false;
if (stringBytes.length < getStringCount()) {
ByteBuffer[] oldStringBytes = stringBytes;
stringBytes = new ByteBuffer[getStringCount()];
for (int i = 0; i < oldStringBytes.length; i++) {
stringBytes[i] = oldStringBytes[i];
}
buffersUpdated = true;
}
if (buffersUpdated) {
// update byte buffers array
ByteBuffer[] newByteBuffers = new ByteBuffer[allButStringByteBuffers.length + stringBytes.length];
for (int i = 0; i < allButStringByteBuffers.length; i++) {
newByteBuffers[i] = allButStringByteBuffers[i];
}
for (int i = 0; i < stringBytes.length; i++) {
newByteBuffers[allButStringByteBuffers.length + i] = stringBytes[i];
}
byteBuffers = newByteBuffers;
//System.err.println("ensureStringsCapacity() Updated buffers to " + Arrays.toString(byteBuffers));
}
{
int stringLength = stringHeader.get(index) +1; // account for extra zero
if (stringBytes[index] == null || stringLength > stringBytes[index].capacity()) {
stringBytes[index] = ByteBuffer.allocateDirect(stringLength);
byteBuffers[allButStringByteBuffers.length + index] = stringBytes[index];
}
}
return buffersUpdated;
}
boolean readFrom(SocketChannel channel) throws IOException {
boolean updatedBuffers = false;
//System.err.println("receiving header from channel");
headerBytes.clear();
readAll(channel, headerBytes);
// set buffers to byte order specified in buffer
setByteOrder(getByteOrder());
//System.err.println("reading content for " + this);
if (ensurePrimitiveCapacity()) {
updatedBuffers = true;
}
// then, set limits for primitive buffers, and receive those
setPrimitiveLimitsFromHeader();
//System.err.println("receiving primitives from channel");
headerBytes.position(headerBytes.limit());
// we also request to read the header, but its position is already
// equal to its limit, so no bytes are read into it.
readAll(channel, allButStringByteBuffers);
// make sure there is enough space for the strings
if (ensureStringsCapacity()) {
updatedBuffers = true;
}
// set the limits
setStringLimitsFromHeader();
//System.err.println("receiving strings from channel");
// and receive!
readAll(channel, stringBytes);
//System.err.println("done receiving message from channel: " + this);
return updatedBuffers;
}
public String toContentString() throws IOException {
String message = "AmuseMessage <id:" + getCallID() + " function ID:" + getFunctionID() + " count:"
+ getCallCount();
if (isErrorState()) {
message = message + " ERROR";
}
if (getByteOrder() == ByteOrder.BIG_ENDIAN) {
message = message + " order: B";
} else {
message = message + " order: l";
}
if (getIntCount() != 0) {
intBytes.clear();
message = message + " ints: [";
for (int i = 0; i < getIntCount(); i++) {
message = message + ", " + intBytes.getInt(i * SIZEOF_INT);
}
message = message + "] ";
}
if (getLongCount() != 0) {
longBytes.clear();
message = message + " longs: [";
for (int i = 0; i < getLongCount(); i++) {
message = message + ", " + longBytes.getLong(i * SIZEOF_LONG);
}
message = message + "] ";
}
if (getFloatCount() != 0) {
floatBytes.clear();
message = message + " floats: [";
for (int i = 0; i < getFloatCount(); i++) {
message = message + ", " + floatBytes.getFloat(i * SIZEOF_FLOAT);
}
message = message + "] ";
}
if (getDoubleCount() != 0) {
doubleBytes.clear();
message = message + " double: [";
for (int i = 0; i < getDoubleCount(); i++) {
message = message + ", " + doubleBytes.getDouble(i * SIZEOF_DOUBLE);
}
message = message + "] ";
}
if (getBooleanCount() != 0) {
message = message + " boolean: [";
for (int i = 0; i < getBooleanCount(); i++) {
message = message + ", " + getBoolean(i);
}
message = message + "] ";
}
if (getStringCount() != 0) {
message = message + " string: [";
for (int i = 0; i < getStringCount(); i++) {
message = message + ", " + getString(i);
}
message = message + "] ";
}
message = message + ">";
// return "Call <id:" + getCallID() + " function ID:" +
// getFunctionID()
// + " count:" + getCount() + " ints:" + getIntCount()
// + " longs: " + getLongCount() + " floats:" + getFloatCount()
// + " doubles:" + getDoubleCount() + " booleans:"
// + getBooleanCount() + " strings:" + getStringCount()
// + " byte order:" + getByteOrder() + " error:"
// + isErrorState() + ">";
return message;
}
public String toString() {
String message = "AmuseMessage <id:" + getCallID() + " function ID:" + getFunctionID() + " count:"
+ getCallCount();
if (isErrorState()) {
message = message + " ERROR";
}
if (getByteOrder() == ByteOrder.BIG_ENDIAN) {
message = message + " order: B";
} else {
message = message + " order: l";
}
if (getIntCount() != 0) {
message = message + " ints:" + getIntCount();
}
if (getLongCount() != 0) {
message = message + " longs:" + getLongCount();
}
if (getFloatCount() != 0) {
message = message + " floats:" + getFloatCount();
}
if (getDoubleCount() != 0) {
message = message + " doubles:" + getDoubleCount();
}
if (getBooleanCount() != 0) {
message = message + " booleans:" + getBooleanCount();
}
if (getStringCount() != 0) {
message = message + " strings:" + getStringCount();
}
message = message + ">";
// return "Call <id:" + getCallID() + " function ID:" +
// getFunctionID()
// + " count:" + getCount() + " ints:" + getIntCount()
// + " longs: " + getLongCount() + " floats:" + getFloatCount()
// + " doubles:" + getDoubleCount() + " booleans:"
// + getBooleanCount() + " strings:" + getStringCount()
// + " byte order:" + getByteOrder() + " error:"
// + isErrorState() + ">";
return message;
}
public void setIntCount(int ints) {
header.put(HEADER_INT_COUNT_INDEX, ints);
}
public void setLongCount(int longs) {
header.put(HEADER_LONG_COUNT_INDEX, longs);
}
public void setFloatCount(int floats) {
header.put(HEADER_FLOAT_COUNT_INDEX, floats);
}
public void setDoubleCount(int doubles) {
header.put(HEADER_DOUBLE_COUNT_INDEX, doubles);
}
public void setBooleanCount(int booleans) {
header.put(HEADER_BOOLEAN_COUNT_INDEX, booleans);
}
public void setStringCount(int strings) {
header.put(HEADER_STRING_COUNT_INDEX, strings);
}
public int[] getIntSlice(int sliceIndex) {
int[] result = new int[getCallCount()];
intBytes.position(getCallCount() * sliceIndex * SIZEOF_INT);
intBytes.limit(getCallCount() * (sliceIndex + 1) * SIZEOF_INT);
intBytes.asIntBuffer().get(result);
return result;
}
public long[] getLongSlice(int sliceIndex) {
long[] result = new long[getCallCount()];
longBytes.position(getCallCount() * sliceIndex * SIZEOF_LONG);
longBytes.limit(getCallCount() * (sliceIndex + 1) * SIZEOF_LONG);
longBytes.asLongBuffer().get(result);
return result;
}
public float[] getFloatSlice(int sliceIndex) {
float[] result = new float[getCallCount()];
floatBytes.position(getCallCount() * sliceIndex * SIZEOF_FLOAT);
floatBytes.limit(getCallCount() * (sliceIndex + 1) * SIZEOF_FLOAT);
floatBytes.asFloatBuffer().get(result);
return result;
}
public double[] getDoubleSlice(int sliceIndex) {
double[] result = new double[getCallCount()];
doubleBytes.position(getCallCount() * sliceIndex * SIZEOF_DOUBLE);
doubleBytes.limit(getCallCount() * (sliceIndex + 1) * SIZEOF_DOUBLE);
doubleBytes.asDoubleBuffer().get(result);
return result;
}
public boolean[] getBooleanSlice(int sliceIndex) throws IOException {
int callCount = getCallCount();
boolean[] result = new boolean[callCount];
int offset = sliceIndex * callCount;
for(int i = 0; i < callCount; i++) {
result[i] = getBoolean(offset + i);
}
return result;
}
public String[] getStringSlice(int sliceIndex) throws IOException {
int callCount = getCallCount();
String[] result = new String[callCount];
int offset = sliceIndex * callCount;
for(int i = 0; i < callCount; i++) {
result[i] = getString(offset + i);
}
return result;
}
// sets all elements of a slice
public void setIntSlice(int sliceIndex, int[] data) {
intBytes.position(getCallCount() * sliceIndex * SIZEOF_INT);
intBytes.limit(getCallCount() * (sliceIndex + 1) * SIZEOF_INT);
intBytes.asIntBuffer().put(data);
}
// sets all elements of a slice to a single value
public void setIntSlice(int sliceIndex, int value) {
intBytes.position(getCallCount() * sliceIndex * SIZEOF_INT);
intBytes.limit(getCallCount() * (sliceIndex + 1) * SIZEOF_INT);
IntBuffer buffer = intBytes.asIntBuffer();
while(buffer.hasRemaining()) {
buffer.put(value);
}
}
// sets all elements of a slice
public void setLongSlice(int sliceIndex, long[] data) {
longBytes.position(getCallCount() * sliceIndex * SIZEOF_LONG);
longBytes.limit(getCallCount() * (sliceIndex + 1) * SIZEOF_LONG);
longBytes.asLongBuffer().put(data);
}
// sets all elements of a slice to a single value
public void setLongSlice(int sliceIndex, long value) {
longBytes.position(getCallCount() * sliceIndex * SIZEOF_LONG);
longBytes.limit(getCallCount() * (sliceIndex + 1) * SIZEOF_LONG);
LongBuffer buffer = longBytes.asLongBuffer();
while(buffer.hasRemaining()) {
buffer.put(value);
}
}
// sets all elements of a slice
public void setFloatSlice(int sliceIndex, float[] data) {
floatBytes.position(getCallCount() * sliceIndex * SIZEOF_FLOAT);
floatBytes.limit(getCallCount() * (sliceIndex + 1) * SIZEOF_FLOAT);
floatBytes.asFloatBuffer().put(data);
}
// sets all elements of a slice to a single value
public void setFloatSlice(int sliceIndex, float value) {
floatBytes.position(getCallCount() * sliceIndex * SIZEOF_FLOAT);
floatBytes.limit(getCallCount() * (sliceIndex + 1) * SIZEOF_FLOAT);
FloatBuffer buffer = floatBytes.asFloatBuffer();
while(buffer.hasRemaining()) {
buffer.put(value);
}
}
// sets all elements of a slice
public void setDoubleSlice(int sliceIndex, double[] data) {
doubleBytes.position(getCallCount() * sliceIndex * SIZEOF_DOUBLE);
doubleBytes.limit(getCallCount() * (sliceIndex + 1) * SIZEOF_DOUBLE);
doubleBytes.asDoubleBuffer().put(data);
}
// sets all elements of a slice to a single value
public void setDoubleSlice(int sliceIndex, double value) {
doubleBytes.position(getCallCount() * sliceIndex * SIZEOF_DOUBLE);
doubleBytes.limit(getCallCount() * (sliceIndex + 1) * SIZEOF_DOUBLE);
DoubleBuffer buffer = doubleBytes.asDoubleBuffer();
while(buffer.hasRemaining()) {
buffer.put(value);
}
}
// sets all elements of a slice
public void setBooleanSlice(int sliceIndex, boolean[] data) {
int callCount = getCallCount();
for(int i = 0; i < callCount; i++) {
setBoolean((callCount * sliceIndex) + i, data[i]);
}
}
// sets all elements of a slice to a single value
public void setBooleanSlice(int sliceIndex, boolean value) {
int callCount = getCallCount();
for(int i = 0; i < callCount; i++) {
setBoolean((callCount * sliceIndex) + i, value);
}
}
// sets all elements of a slice
public void setStringSlice(int sliceIndex, String[] data) {
int callCount = getCallCount();
for(int i = 0; i < callCount; i++) {
setString((callCount * sliceIndex) + i, data[i]);
}
}
// sets all elements of a slice to a single value
public void setStringSlice(int sliceIndex, String value) {
int callCount = getCallCount();
for(int i = 0; i < callCount; i++) {
setString((callCount * sliceIndex) + i, value);
}
}
}
"""
FOOTER_CODE_STRING = """
private final AmuseMessage request;
private final AmuseMessage reply;
private final CodeInterface code;
Worker(String codeDir, String amuseRootDir) throws Exception {
this.request = new AmuseMessage();
this.reply = new AmuseMessage();
code = new Code(codeDir, amuseRootDir);
}
private void runSockets(int port, String host) {
try {
SocketChannel channel = SocketChannel.open(new InetSocketAddress(host, port));
channel.socket().setTcpNoDelay(true);
boolean keepRunning = true;
while (keepRunning) {
request.clear();
request.readFrom(channel);
//System.err.println("got message " + request.toString());
reply.clear();
reply.setCallID(request.getCallID());
reply.setFunctionID(request.getFunctionID());
reply.setCallCount(request.getCallCount());
keepRunning = handleCall();
//System.err.println("sending reply message " + reply.toString());
//System.err.println("sending reply message " + reply.toContentString());
reply.writeTo(channel);
//System.err.println("call handled");
}
code.end();
} catch (IOException e) {
System.err.println("Error running worker: " + e.getMessage());
} finally {
code.end();
}
}
public static void main(String[] arguments) throws Exception {
//System.err.println("Java worker");
//for (String argument : arguments) {
// System.err.println("argument: " + argument);
//}
if (arguments.length == 0) {
System.err.println("No arguments to java worker. expected a socket port number");
System.exit(1);
}
String codeDir = System.getProperty("code.dir");
if (codeDir == null) {
System.err.println("Expected code dir not specified");
System.exit(1);
}
String amuseRootDir = System.getProperty("amuse.root.dir");
if (amuseRootDir == null) {
System.err.println("Expected amuse root dir not specified");
System.exit(1);
}
int port = Integer.parseInt(arguments[0]);
String hostname = arguments[1];
new Worker(codeDir, amuseRootDir).runSockets(port, hostname);
}
"""
class MakeJavaCodeString(GenerateASourcecodeString):
@late
def dtype_to_spec(self):
return dtype_to_spec
class GenerateAJavaStringOfAFunctionSpecification(MakeJavaCodeString):
@late
def specification(self):
raise exceptions.AmuseException("No specification set, please set the specification first")
def start(self):
#must and can handle array is the same thing in Java codes...
if self.specification.can_handle_array:
self.specification.must_handle_array = True
self.specification.prepare_output_parameters()
self.output_casestmt_start()
self.out.indent()
self.out.lf() + "{"
self.out.indent()
self.output_lines_with_number_of_outputs()
if hasattr(self.specification,"internal_provided"):
self.out.lf() + "//" + self.specification.name + " ignored"
else:
self.output_declare_variables()
self.output_function_start()
self.output_function_parameters()
self.output_function_end()
self.output_copy_output_variables()
self.out.dedent()
self.out.lf() + "}"
self.output_casestmt_end()
self.out.dedent()
self._result = self.out.string
def output_casestmt_start(self):
self.out + 'case ' + self.specification.id + ':'
def output_lines_with_number_of_outputs(self):
dtype_to_count = {}
for parameter in self.specification.output_parameters:
count = dtype_to_count.get(parameter.datatype, 0)
dtype_to_count[parameter.datatype] = count + 1
if not self.specification.result_type is None:
count = dtype_to_count.get(self.specification.result_type, 0)
dtype_to_count[self.specification.result_type] = count + 1
for dtype in dtype_to_count:
spec = self.dtype_to_spec[dtype]
count = dtype_to_count[dtype]
self.out.lf() + 'reply.set' + spec.input_var_name + 'Count(' + count + ' * count);'
pass
self.out.lf() + 'reply.ensurePrimitiveCapacity();'
def output_function_parameters(self):
self.out.indent()
first = True
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if first:
first = False
else:
self.out + ', '
if parameter.direction == LegacyFunctionSpecification.IN:
if self.specification.must_handle_array:
self.out + parameter.name
else:
self.out + parameter.name + '[0]'
if parameter.direction == LegacyFunctionSpecification.INOUT:
self.out + parameter.name
elif parameter.direction == LegacyFunctionSpecification.OUT:
self.out + parameter.name
elif parameter.direction == LegacyFunctionSpecification.LENGTH:
self.out + 'count'
self.out.dedent()
def output_declare_variables(self):
if not self.specification.result_type is None:
spec = self.dtype_to_spec[self.specification.result_type]
self.out.lf() + spec.type + ' functionResult;'
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if parameter.direction == LegacyFunctionSpecification.IN or parameter.direction == LegacyFunctionSpecification.INOUT :
self.out.lf() + spec.type + '[] ' + parameter.name + ' = request.get' + spec.input_var_name + 'Slice(' + parameter.input_index + ');'
if parameter.direction == LegacyFunctionSpecification.OUT:
self.out.lf() + spec.type + '[] ' + parameter.name + ' = new ' + spec.type + '[count];'
def output_function_start(self):
self.out.n()
if not self.specification.result_type is None:
self.out + 'functionResult = '
self.out + 'code.' + self.specification.name + '('
def output_function_end(self):
self.out + ')' + ';'
def output_copy_output_variables(self):
if not self.specification.result_type is None:
spec = self.dtype_to_spec[self.specification.result_type]
self.out.lf() + 'reply.set' + spec.output_var_name + 'Slice(0, functionResult);'
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if parameter.direction == LegacyFunctionSpecification.OUT or parameter.direction == LegacyFunctionSpecification.INOUT:
self.out.lf() + 'reply.set' + spec.output_var_name + 'Slice(' + parameter.output_index + ', ' + parameter.name + ');'
def output_casestmt_end(self):
self.out.n() + 'break;'
class GenerateAJavaFunctionDeclarationStringFromAFunctionSpecification(MakeJavaCodeString):
def start(self):
#must and can handle array is the same thing in Java codes...
if self.specification.can_handle_array:
self.specification.must_handle_array = True
self.output_function_parameter_types()
self.output_function_start()
self.output_function_parameters()
self.output_function_end()
self._result = self.out.string
def output_function_parameter_types(self):
for parameter in self.specification.parameters:
if (parameter.direction == LegacyFunctionSpecification.IN):
self.out.lf() + '// parameter "' + parameter.name + '" is an input parameter'
elif (parameter.direction == LegacyFunctionSpecification.OUT):
self.out.lf() + '// parameter "' + parameter.name + '" is an output parameter'
elif (parameter.direction == LegacyFunctionSpecification.INOUT):
self.out.lf() + '// parameter "' + parameter.name + '" is an inout parameter'
elif (parameter.direction == LegacyFunctionSpecification.LENGTH):
self.out.lf() + '// parameter "' + parameter.name + '" is a length parameter'
def output_function_parameters(self):
first = True
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if first:
first = False
else:
self.out + ', '
self.out + spec.type
if ((self.specification.must_handle_array and parameter.is_input()) or parameter.is_output()):
self.out + '[]'
self.out + ' '
self.out + parameter.name
def output_function_end(self):
self.out + ')' + ';'
def output_function_start(self):
self.out.n()
if not self.specification.result_type is None:
spec = self.dtype_to_spec[self.specification.result_type]
self.out + spec.type
self.out + ' '
else:
self.out + 'void' + ' '
self.out + self.specification.name + '('
class GenerateAJavaSourcecodeStringFromASpecificationClass\
(GenerateASourcecodeStringFromASpecificationClass):
@late
def specification_class(self):
raise exceptions.AmuseException("No specification_class set, please set the specification_class first")
@late
def dtype_to_spec(self):
return dtype_to_spec
def output_sourcecode_for_function(self):
return GenerateAJavaStringOfAFunctionSpecification()
def start(self):
self.out.lf()
self.out + IMPORTS_CODE_STRING
self.out.lf() + 'class Worker {'
self.out.indent().lf()
self.out + AMUSE_MESSAGE_CLASS_CODE_STRING
self.output_handle_call()
self.out.lf() + FOOTER_CODE_STRING
self.out.dedent().lf()
self.out.lf() + "}"
self._result = self.out.string
def output_code_constants(self):
for dtype in list(self.dtype_to_spec.keys()):
dtype_spec = self.dtype_to_spec[dtype]
maxin = self.mapping_from_dtype_to_maximum_number_of_inputvariables.get(dtype, 0)
self.out + 'static int MAX_' + dtype_spec.input_var_name.upper() + ' = ' + maxin + ";"
self.out.lf()
maxout = self.mapping_from_dtype_to_maximum_number_of_outputvariables.get(dtype, 0)
self.out + 'static int MAX_' + dtype_spec.output_var_name.upper() + ' = ' + maxout + ";"
self.out.lf()
def output_handle_call(self):
self.out.lf().lf() + 'private boolean handleCall() throws IOException {'
self.out.indent()
self.out.lf() + 'int count = request.getCallCount();'
self.out.lf().lf() + 'switch (request.getFunctionID()) {'
self.out.indent()
self.out.lf() + 'case 0:'
self.out.indent()
self.out.lf() + 'code.end();'
self.out.lf() + 'return false;'
self.out.dedent()
self.output_sourcecode_for_functions()
self.out.lf() + 'default:'
self.out.indent()
self.out.lf() + 'System.err.println("unknown function id " + request.getFunctionID());'
self.out.lf() + 'reply.setError("unknown function id " + request.getFunctionID());'
self.out.dedent()
self.out.dedent().lf() + '}'
self.out.dedent()
self.out.indent().lf() + 'return true;'
self.out.dedent().lf() + '}'
class GenerateAJavaInterfaceStringFromASpecificationClass\
(GenerateASourcecodeStringFromASpecificationClass):
@late
def ignore_functions_from_specification_classes(self):
return []
@late
def underscore_functions_from_specification_classes(self):
return []
@late
def dtype_to_spec(self):
return dtype_to_spec
def must_include_interface_function_in_output(self, x):
if hasattr(x.specification,"internal_provided"):
return False
for cls in self.ignore_functions_from_specification_classes:
if hasattr(cls, x.specification.name):
return False
return True
def output_sourcecode_for_function(self):
return GenerateAJavaFunctionDeclarationStringFromAFunctionSpecification()
def start(self):
self.out + 'public interface CodeInterface {'
self.out.indent().lf()
self.out + 'public void end();'
self.out.lf()
self.output_sourcecode_for_functions()
self.out.dedent().lf() + '}'
self.out.lf()
self._result = self.out.string
class GenerateAJavaWorkerScript(GenerateASourcecodeString):
@late
def amuse_root_dir(self):
return os.path.abspath(options.GlobalOptions.instance().amuse_rootdirectory)
@late
def code_dir(self):
codedir=os.path.split(self.code_directory())[-1]
return os.path.join("community", codedir)
@late
def java(self):
return config.java.java
@late
def template_dir(self):
return os.path.dirname(__file__)
@late
def template_string(self):
path = self.template_dir
path = os.path.join(path, 'java_code_script.template')
with open(path, "r") as f:
template_string = f.read()
return template_string
@staticmethod
def classpath(classpath, code_dir):
return ":".join([os.path.join(code_dir, x) for x in classpath])
def script_string(self):
return self.template_string.format(
executable = sys.executable,
java = self.java,
classpath = self.classpath(self.specification_class.classpath, self.code_dir),
code_dir = self.code_dir,
amuse_root_dir = self.amuse_root_dir
)
def code_directory(self):
interface_module = inspect.getmodule(self.specification_class).__name__
return os.path.dirname(inspect.getfile(self.specification_class))
def start(self):
self.out + self.script_string()
self._result = self.out.string
| 53,344
| 34.072321
| 149
|
py
|
amuse
|
amuse-main/src/amuse/rfi/tools/fortran_tools.py
|
from amuse.support.literature import TrackLiteratureReferences
# ptype:
# simple: rw, scalar value and implementation generated by interface
# normal: rw, scalar value, custom implementation
# ro: read only, scalar value, generated
# vector: rw, generated vector valued
class FortranCodeGenerator(object):
_getter_string="""
function get_{0}(x) result(ret)
integer :: ret
{1} :: x
x={0}
ret=0
end function
"""
_setter_string="""
function set_{0}(x) result(ret)
integer :: ret
{1} :: x
{0}=x
ret=0
end function
"""
_vector_getter_string="""
function get_{0}(i,x) result(ret)
integer :: i,ret
{1} :: x
x={0}(i)
ret=0
end function
"""
_vector_setter_string="""
function set_{0}(i,x) result(ret)
integer :: i,ret
{1} :: x
{0}(i)=x
ret=0
end function
"""
_grid_getter_template="""
function get_{0}({2},{0}_out_,n) result(ret)
integer :: n,{3},k,ret
{1} :: {0}_out_(n)
ret=0
do k=1,n
{5}
{0}_out_(k)={0}({4})
enddo
end function
"""
_grid_setter_template="""
function set_{0}({2},{0}_in_,n) result(ret)
integer :: n,{3},k,ret
{1} :: {0}_in_(n)
ret=0
do k=1,n
{5}
{0}({4})={0}_in_(k)
enddo
end function
"""
datatypedict={"string" : "character(len=*) ",
"float64" : "real*8",
"float32" : "real",
"int32" : "integer",
"bool" : "logical",
}
def __init__(self,parameter_definition=None, grid_variable_definition=None):
if parameter_definition is None:
parameter_definition=dict()
if grid_variable_definition is None:
grid_variable_definition=dict()
self.parameter_definition=parameter_definition
self.grid_variable_definition=grid_variable_definition
def _grid_format_arg(self,name, dtype,ndim=1, index_ranges=None):
arg=','.join(['i{0}'.format(i) for i in range(ndim)])
dec=','.join(['i{0}(n)'.format(i) for i in range(ndim)])
assign=','.join(['i{0}(k)'.format(i) for i in range(ndim)])
if index_ranges is None:
check="! no check on index range"
else:
checks=(".OR. &\n"+" "*11).join(["({0}.LT.{1}.OR.{0}.GT.{2})".format('i{0}(k)'.format(i),
index_ranges[i][0],index_ranges[i][1]) for i in range(ndim)])
check="if({0}) then\n".format(checks)+ \
" "*10+"ret=-1\n"+ \
" "*10+"exit\n"+ \
" "*8+"endif"
dtype=self.datatypedict[dtype]
return name,dtype,arg,dec,assign,check
def grid_getter(self, name, dtype,ndim=1, index_ranges=None):
return self._grid_getter_template.format(*self._grid_format_arg(name,dtype,ndim,index_ranges))
def grid_setter(self, name, dtype,ndim=1, index_ranges=None):
return self._grid_setter_template.format(*self._grid_format_arg(name,dtype,ndim,index_ranges))
def parameter_getter_setters(self):
filestring=""
py_to_f=self.datatypedict
for par,d in self.parameter_definition.items():
if d["ptype"] in ["ro"]:
filestring+=self._getter_string.format(d["short"],py_to_f[d["dtype"]])
if d["ptype"] in ["simple"]:
filestring+=self._setter_string.format(d["short"],py_to_f[d["dtype"]])
filestring+=self._getter_string.format(d["short"],py_to_f[d["dtype"]])
if d["ptype"] in ["vector"]:
filestring+=self._vector_setter_string.format(d["short"],py_to_f[d["dtype"]])
filestring+=self._vector_getter_string.format(d["short"],py_to_f[d["dtype"]])
return filestring
def grid_getter_setters(self):
string=""
for var, d in self.grid_variable_definition.items():
vartype=d.get("vartype", None)
dtype=d.get("dtype", "float64")
ndim=d.get("ndim", 1)
index_ranges=d.get("index_ranges",None)
for forvar in d["forvar"]:
string+=self.grid_getter(forvar, dtype, ndim, index_ranges)
if vartype!="ro":
string+=self.grid_setter(forvar, dtype, ndim, index_ranges)
return string
def generate_getters_setters(self,filename=None):
filestring=""
#~ filestring+=input_grid_string(_unstructured_input_grid_template)
#~ filestring+=input_grid_string(_regular_input_grid_template)
filestring+=self.parameter_getter_setters()
filestring+=self.grid_getter_setters()
if filename is None:
return filestring
else:
with open(filename,"w") as f:
f.write(filestring)
def print_getters_setters(self):
TrackLiteratureReferences.suppress_output()
print(self.generate_getters_setters())
def generate_parameter_interface_functions(self):
output=""
for par,d in self.parameter_definition.items():
dtype=d["dtype"]
if hasattr(d["default"],"unit"):
unit=d["default"].unit.reference_string()
else:
unit="None"
short=d["short"]
ptype=d["ptype"]
if ptype in ["ro"]:
output+=("@legacy_function\ndef get_"+short+"():\n function = LegacyFunctionSpecification()\n"
" function.addParameter('"+short+"', dtype='"+dtype+"', direction=function.OUT, unit="+unit+")\n"
" function.result_type = 'int32'\n return function\n")
if ptype in ["simple"]:
output+=("@legacy_function\ndef get_"+short+"():\n function = LegacyFunctionSpecification()\n"
" function.addParameter('"+short+"', dtype='"+dtype+"', direction=function.OUT, unit="+unit+")\n"
" function.result_type = 'int32'\n return function\n")
output+=("@legacy_function\ndef set_"+short+"():\n function = LegacyFunctionSpecification()\n"
" function.addParameter('"+short+"', dtype='"+dtype+"', direction=function.IN, unit="+unit+")\n"
" function.result_type = 'int32'\n return function\n")
if ptype in ["vector"]:
output+=("@legacy_function\ndef get_"+short+"():\n function = LegacyFunctionSpecification()\n"
" function.addParameter('i', dtype='i', direction=function.IN)\n"
" function.addParameter('"+short+"', dtype='"+dtype+"', direction=function.OUT, unit="+unit+")\n"
" function.can_handle_array=True\n"
" function.result_type = 'int32'\n return function\n")
output+=("@legacy_function\ndef set_"+short+"():\n function = LegacyFunctionSpecification()\n"
" function.addParameter('i', dtype='i', direction=function.IN)\n"
" function.addParameter('"+short+"', dtype='"+dtype+"', direction=function.IN, unit="+unit+")\n"
" function.can_handle_array=True\n"
" function.result_type = 'int32'\n return function\n")
length=d["length"]
output+=( "def get_"+short+"_range(self):\n" + (
(" return 1," + str(length)) if isinstance(length, int) else
(" return 1, self.get_"+length+"()['"+length+"']\n") )
)
return output
def generate_grid_interface_functions(self):
output=""
for var, d in self.grid_variable_definition.items():
vartype=d.get("vartype", None)
dtype=d.get("dtype", "float64")
dtype=dtype.__name__ if isinstance(dtype, type) else str(dtype)
ndim=d.get("ndim", 1)
index_ranges=d.get("index_ranges",None)
unit=d.get("unit",None)
unit="None" if unit is None else unit.reference_string()
for pyvar, forvar in zip(d["pyvar"], d["forvar"]):
if vartype!="ro":
output+=("@legacy_function\ndef set_"+forvar+"():\n function = LegacyFunctionSpecification()\n" + \
"".join([" function.addParameter('index{0}', dtype='i', direction=function.IN)\n".format(i) for i in range(ndim)]) + \
" function.addParameter('"+pyvar+"', dtype='"+dtype+"', direction=function.IN, unit="+unit+")\n" + \
" function.addParameter('n', direction=function.LENGTH)\n" + \
" function.must_handle_array=True\n" + \
" function.result_type = 'int32'\n return function\n")
output+=("@legacy_function\ndef get_"+forvar+"():\n function = LegacyFunctionSpecification()\n" + \
"".join([" function.addParameter('index{0}', dtype='i', direction=function.IN)\n".format(i) for i in range(ndim)]) + \
" function.addParameter('"+pyvar+"', dtype='"+dtype+"', direction=function.OUT, unit="+unit+")\n" + \
" function.addParameter('n', direction=function.LENGTH)\n" + \
" function.must_handle_array=True\n" + \
" function.result_type = 'int32'\n return function\n")
return output
def generate_interface_functions(self):
output=""
output+=self.generate_parameter_interface_functions()
output+=self.generate_grid_interface_functions()
return output
def generate_parameter_definitions(self, object):
for name,d in self.parameter_definition.items():
short=d["short"]
ptype=d["ptype"]
dtype=d["dtype"]
getter="get_"+short
if ptype in ["simple","normal","vector"]:
setter="set_"+short
else:
setter=None
range_method="get_"+short+"_range"
if ptype in ["simple", "normal", "ro"]:
if dtype!='bool':
object.add_method_parameter(
getter,
setter,
name,
d["description"],
d["default"]
)
else:
object.add_boolean_parameter(
getter,
setter,
name,
d["description"],
d["default"]
)
else:
object.add_array_parameter(
getter,
setter,
range_method,
name,
d["description"]
)
if __name__=="__main__":
grid_var={
"pressure" : dict( pyvar=["pressure"], forvar=["pom"], dtype="float64", ndim=1, index_ranges=[(1,10)],vartype="ro"),
"test2" : dict( pyvar=["y"], forvar=["y"], dtype="float64", ndim=2, index_ranges=[(1,"nla"),(1,"nla")])
}
f=FortranCodeGenerator(grid_variable_definition=grid_var)
print(f.grid_getter_setters())
print(f.generate_grid_interface_functions())
| 11,322
| 41.567669
| 140
|
py
|
amuse
|
amuse-main/src/amuse/rfi/tools/create_c.py
|
from amuse.support.core import late
from amuse.support import exceptions
from amuse.rfi.tools.create_code import GenerateASourcecodeString
from amuse.rfi.tools.create_code import GenerateASourcecodeStringFromASpecificationClass
from amuse.rfi.tools.create_code import DTypeSpec
from amuse.rfi.tools.create_code import dtypes
from amuse.rfi.tools.create_code import DTypeToSpecDictionary
from amuse.rfi.tools import create_definition
from amuse.rfi.core import LegacyFunctionSpecification
dtype_to_spec = DTypeToSpecDictionary({
'int32' : DTypeSpec('ints_in', 'ints_out',
'HEADER_INTEGER_COUNT', 'int', 'MPI_INT'),
'int64' : DTypeSpec('longs_in', 'longs_out',
'HEADER_LONG_COUNT', 'long long int', 'MPI_LONG_LONG_INT'),
'float32' : DTypeSpec('floats_in', 'floats_out',
'HEADER_FLOAT_COUNT', 'float', 'MPI_FLOAT'),
'float64' : DTypeSpec('doubles_in', 'doubles_out',
'HEADER_DOUBLE_COUNT', 'double', 'MPI_DOUBLE'),
'bool' : DTypeSpec('booleans_in', 'booleans_out',
'HEADER_BOOLEAN_COUNT', 'bool', 'MPI_C_BOOL'),
'string' : DTypeSpec('strings_in', 'strings_out',
'HEADER_STRING_COUNT', 'int', 'MPI_INTEGER'),
})
HEADER_CODE_STRING = """
#ifndef NOMPI
#include <mpi.h>
#endif
#include <iostream>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef WIN32
#include <winsock2.h>
#else
#include <sys/socket.h>
#include <netinet/in.h>
#include <unistd.h>
#include <netdb.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#endif
#if _POSIX_VERSION >= 1
#ifndef _POSIX_C_SOURCE
#define _POSIX_C_SOURCE 1
#endif
#include <signal.h>
#endif
"""
CONSTANTS_AND_GLOBAL_VARIABLES_STRING = """
static int ERROR_FLAG = 256;
static int HEADER_SIZE = 11; //integers
static int HEADER_FLAGS = 0;
static int HEADER_CALL_ID = 1;
static int HEADER_FUNCTION_ID = 2;
static int HEADER_CALL_COUNT = 3;
static int HEADER_INTEGER_COUNT = 4;
static int HEADER_LONG_COUNT = 5;
static int HEADER_FLOAT_COUNT = 6;
static int HEADER_DOUBLE_COUNT = 7;
static int HEADER_BOOLEAN_COUNT = 8;
static int HEADER_STRING_COUNT = 9;
static int HEADER_UNITS_COUNT = 10;
static bool TRUE_BYTE = 1;
static bool FALSE_BYTE = 0;
static bool mpiIntercom = false;
static int socketfd = 0;
static int * header_in;
static int * header_out;
static int * ints_in;
static int * ints_out;
static long long int * longs_in;
static long long int * longs_out;
static float * floats_in;
static float * floats_out;
static double * doubles_in;
static double * doubles_out;
static bool * booleans_in;
static bool * booleans_out;
/* sizes of strings */
static int * string_sizes_in;
static int * string_sizes_out;
/* pointers to input and output strings (contents not stored here) */
static char * * strings_in;
static char * * strings_out;
/* actual string data */
static char * characters_in = 0;
static char * characters_out = 0;
"""
POLLING_FUNCTIONS_STRING = """
static int polling_interval = 0;
#ifndef NOMPI
#define MAX_COMMUNICATORS 2048
static char portname_buffer[MPI_MAX_PORT_NAME+1];
static MPI_Comm communicators[MAX_COMMUNICATORS];
static int lastid = -1;
static int activeid = -1;
static int id_to_activate = -1;
#else
static const char * empty_string = "";
#endif
int internal__get_message_polling_interval(int * outval)
{
*outval = polling_interval;
return 0;
}
int internal__set_message_polling_interval(int inval)
{
polling_interval = inval;
return 0;
}
int internal__open_port(char ** output)
{
#ifndef NOMPI
MPI_Open_port(MPI_INFO_NULL, portname_buffer);
*output = portname_buffer;
#else
*output = (char *) empty_string;
#endif
return 0;
}
int internal__accept_on_port(char * port_identifier, int * comm_identifier)
{
#ifndef NOMPI
int rank = 0;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
lastid++;
if(lastid >= MAX_COMMUNICATORS) {
lastid--;
return -1;
}
if(rank == 0){
MPI_Comm merged;
MPI_Comm communicator;
MPI_Comm_accept(port_identifier, MPI_INFO_NULL, 0, MPI_COMM_SELF, &communicator);
MPI_Intercomm_merge(communicator, 0, &merged);
MPI_Intercomm_create(MPI_COMM_WORLD,0,merged, 1, 65, &communicators[lastid]);
MPI_Comm_free(&merged);
MPI_Comm_free(&communicator);
} else {
MPI_Intercomm_create(MPI_COMM_WORLD,0, MPI_COMM_NULL, 1, 65, &communicators[lastid]);
}
*comm_identifier = lastid;
#else
*comm_identifier = -1;
#endif
return 0;
}
int internal__connect_to_port(char * port_identifier, int * comm_identifier)
{
#ifndef NOMPI
int rank = 0;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
lastid++;
if(lastid >= MAX_COMMUNICATORS) {
lastid--;
return -1;
}
if(rank == 0){
MPI_Comm merged;
MPI_Comm communicator;
MPI_Comm_connect(port_identifier, MPI_INFO_NULL, 0, MPI_COMM_SELF, &communicator);
MPI_Intercomm_merge(communicator, 1, &merged);
MPI_Intercomm_create(MPI_COMM_WORLD, 0, merged, 0, 65, &communicators[lastid]);
MPI_Comm_free(&merged);
MPI_Comm_free(&communicator);
} else {
MPI_Intercomm_create(MPI_COMM_WORLD, 0, MPI_COMM_NULL, 1, 65, &communicators[lastid]);
}
*comm_identifier = lastid;
#else
*comm_identifier = -1;
#endif
return 0;
}
int internal__activate_communicator(int comm_identifier){
#ifndef NOMPI
if(comm_identifier < 0 || comm_identifier > lastid) {
return -1;
}
id_to_activate = comm_identifier;
#endif
return 0;
}
int internal__become_code(int number_of_workers, char * modulename, char * classname)
{
return 0;
}
"""
RECV_HEADER_SLEEP_STRING = """
#ifndef NOMPI
#include <unistd.h>
int mpi_recv_header(MPI_Comm & parent)
{
MPI_Request header_request;
MPI_Status request_status;
MPI_Irecv(header_in, HEADER_SIZE, MPI_INT, 0, 989, parent, &header_request);
if(polling_interval > 0)
{
int is_finished = 0;
MPI_Test(&header_request, &is_finished, &request_status);
while(!is_finished) {
usleep(polling_interval);
MPI_Test(&header_request, &is_finished, &request_status);
}
MPI_Wait(&header_request, &request_status);
} else {
MPI_Wait(&header_request, &request_status);
}
return 0;
}
#endif
"""
FOOTER_CODE_STRING = """
void onexit_mpi(void) {
#ifndef NOMPI
int flag = 0;
MPI_Finalized(&flag);
if(!flag) {
MPI_Comm parent;
MPI_Comm_get_parent(&parent);
int rank = 0;
MPI_Comm_rank(parent, &rank);
header_out[HEADER_FLAGS] = ERROR_FLAG;
header_out[HEADER_CALL_ID] = 0;
header_out[HEADER_FUNCTION_ID] = 0;
header_out[HEADER_CALL_COUNT] = 0;
header_out[HEADER_INTEGER_COUNT] = 0;
header_out[HEADER_LONG_COUNT] = 0;
header_out[HEADER_FLOAT_COUNT] = 0;
header_out[HEADER_DOUBLE_COUNT] = 0;
header_out[HEADER_BOOLEAN_COUNT] = 0;
header_out[HEADER_STRING_COUNT] = 0;
header_out[HEADER_UNITS_COUNT] = 0;
MPI_Send(header_out, HEADER_SIZE, MPI_INT, 0, 999, parent);
for(int i = 0; i < lastid + 1; i++) {
MPI_Comm_disconnect(&communicators[i]);
}
MPI_Finalize();
}
#endif
}
void onexit_sockets(void) {
#ifdef WIN32
closesocket(socketfd);
#else
close(socketfd);
#endif
}
void send_array_sockets(void *buffer, int length, int file_descriptor, int rank) {
int total_written = 0;
int bytes_written;
if (rank != 0) {
return;
}
//fprintf(stderr, "number of bytes to write: %d\\n", length);
while (total_written < length) {
#ifdef WIN32
bytes_written = send(file_descriptor, ((char *) buffer) + total_written,
length - total_written, 0);
#else
bytes_written = write(file_descriptor, ((char *) buffer) + total_written,
length - total_written);
#endif
if (bytes_written == -1) {
perror("could not write data");
exit(1);
}
total_written = total_written + bytes_written;
}
}
void receive_array_sockets(void *buffer, int length, int file_descriptor, int rank) {
int total_read = 0;
int bytes_read;
if (rank != 0) {
return;
}
while (total_read < length) {
#ifdef WIN32
bytes_read = recv(file_descriptor, ((char *) buffer) + total_read,
length - total_read, 0);
#else
bytes_read = read(file_descriptor, ((char *) buffer) + total_read,
length - total_read);
#endif
if (bytes_read == -1) {
perror("could not read data");
exit(1);
}
total_read = total_read + bytes_read;
}
}
void new_arrays(int max_call_count) {
ints_in = new int[ max_call_count * MAX_INTS_IN];
ints_out = new int[ max_call_count * MAX_INTS_OUT];
longs_in = new long long int[ max_call_count * MAX_LONGS_IN];
longs_out = new long long int[ max_call_count * MAX_LONGS_OUT];
floats_in = new float[ max_call_count * MAX_FLOATS_IN];
floats_out = new float[ max_call_count * MAX_FLOATS_OUT];
doubles_in = new double[ max_call_count * MAX_DOUBLES_IN];
doubles_out = new double[ max_call_count * MAX_DOUBLES_OUT];
booleans_in = new bool[ max_call_count * MAX_BOOLEANS_IN];
booleans_out = new bool[ max_call_count * MAX_BOOLEANS_OUT];
string_sizes_in = new int[ max_call_count * MAX_STRINGS_IN];
string_sizes_out = new int[ max_call_count * MAX_STRINGS_OUT];
strings_in = new char *[ max_call_count * MAX_STRINGS_IN];
strings_out = new char *[ max_call_count * MAX_STRINGS_OUT];
}
void delete_arrays() {
delete[] ints_in;
delete[] ints_out;
delete[] longs_in;
delete[] longs_out;
delete[] floats_in;
delete[] floats_out;
delete[] doubles_in;
delete[] doubles_out;
delete[] booleans_in;
delete[] booleans_out;
delete[] string_sizes_in;
delete[] string_sizes_out;
delete[] strings_in;
delete[] strings_out;
}
#if !defined(NOMPI) && _POSIX_VERSION >= 1
void abort_mpi_on_signal(int signo)
{
MPI_Comm parent;
MPI_Request req;
MPI_Comm_get_parent(&parent);
header_out[HEADER_FLAGS] = ERROR_FLAG;
header_out[HEADER_CALL_ID] = 0;
header_out[HEADER_FUNCTION_ID] = 0;
header_out[HEADER_CALL_COUNT] = 0;
header_out[HEADER_INTEGER_COUNT] = 0;
header_out[HEADER_LONG_COUNT] = 0;
header_out[HEADER_FLOAT_COUNT] = 0;
header_out[HEADER_DOUBLE_COUNT] = 0;
header_out[HEADER_BOOLEAN_COUNT] = 0;
header_out[HEADER_STRING_COUNT] = 0;
header_out[HEADER_UNITS_COUNT] = 0;
MPI_Isend(header_out, HEADER_SIZE, MPI_INT, 0, 999, parent, &req);
MPI_Comm_disconnect(&parent);
MPI_Abort(MPI_COMM_WORLD, -1);
}
#endif
void run_mpi(int argc, char *argv[]) {
#ifndef NOMPI
int provided;
int rank = 0;
mpiIntercom = true;
//fprintf(stderr, "C worker: running in mpi mode\\n");
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
#if _POSIX_VERSION >= 1
if (provided == MPI_THREAD_MULTIPLE) {
int abort_signals[] = {
SIGABRT, SIGBUS, SIGILL, SIGINT, SIGQUIT, SIGSEGV, SIGTERM
};
struct sigaction handler;
handler.sa_handler = abort_mpi_on_signal;
sigemptyset(&handler.sa_mask);
handler.sa_flags = 0;
for (int i = 0; i < int ((sizeof abort_signals) / (sizeof abort_signals[0])); i++) {
int result = sigaction(abort_signals[i], &handler, NULL);
if (result == -1) {
perror("Error installing signal handler");
exit(EXIT_FAILURE);
}
}
}
#endif
MPI_Comm parent;
MPI_Comm_get_parent(&communicators[0]);
lastid += 1;
activeid = 0;
parent = communicators[activeid];
MPI_Comm_rank(parent, &rank);
atexit(onexit_mpi);
bool must_run_loop = true;
int max_call_count = 10;
header_in = new int[HEADER_SIZE];
header_out = new int[HEADER_SIZE];
new_arrays(max_call_count);
while(must_run_loop) {
//fprintf(stderr, "receiving header\\n");
if(id_to_activate >= 0 && id_to_activate != activeid){
activeid = id_to_activate;
id_to_activate = -1;
parent = communicators[activeid];
MPI_Comm_rank(parent, &rank);
}
mpi_recv_header(parent);
//fprintf(stderr, "C worker code: got header %d %d %d %d %d %d %d %d %d %d\\n", header_in[0], header_in[1], header_in[2], header_in[3], header_in[4], header_in[5], header_in[6], header_in[7], header_in[8], header_in[9]);
int call_count = header_in[HEADER_CALL_COUNT];
if (call_count > max_call_count) {
delete_arrays();
max_call_count = call_count + 255;
new_arrays(max_call_count);
}
if(header_in[HEADER_INTEGER_COUNT] > 0) {
MPI_Bcast(ints_in, header_in[HEADER_INTEGER_COUNT] , MPI_INT, 0, parent);
}
if(header_in[HEADER_LONG_COUNT] > 0) {
MPI_Bcast(longs_in, header_in[HEADER_LONG_COUNT], MPI_LONG_LONG_INT, 0, parent);
}
if(header_in[HEADER_FLOAT_COUNT] > 0) {
MPI_Bcast(floats_in, header_in[HEADER_FLOAT_COUNT], MPI_FLOAT, 0, parent);
}
if(header_in[HEADER_DOUBLE_COUNT] > 0) {
MPI_Bcast(doubles_in, header_in[HEADER_DOUBLE_COUNT], MPI_DOUBLE, 0, parent);
}
if(header_in[HEADER_BOOLEAN_COUNT] > 0) {
MPI_Bcast(booleans_in, header_in[HEADER_BOOLEAN_COUNT], MPI_C_BOOL, 0, parent);
}
if(header_in[HEADER_STRING_COUNT] > 0) {
MPI_Bcast(string_sizes_in, header_in[HEADER_STRING_COUNT], MPI_INTEGER, 0, parent);
int total_string_size = 0;
for (int i = 0; i < header_in[HEADER_STRING_COUNT];i++) {
total_string_size += string_sizes_in[i] + 1;
}
characters_in = new char[total_string_size];
MPI_Bcast(characters_in, total_string_size, MPI_CHARACTER, 0, parent);
int offset = 0;
for (int i = 0 ; i < header_in[HEADER_STRING_COUNT];i++) {
strings_in[i] = characters_in + offset;
offset += string_sizes_in[i] + 1;
}
}
header_out[HEADER_FLAGS] = 0;
header_out[HEADER_CALL_ID] = header_in[HEADER_CALL_ID];
header_out[HEADER_FUNCTION_ID] = header_in[HEADER_FUNCTION_ID];
header_out[HEADER_CALL_COUNT] = call_count;
header_out[HEADER_INTEGER_COUNT] = 0;
header_out[HEADER_LONG_COUNT] = 0;
header_out[HEADER_FLOAT_COUNT] = 0;
header_out[HEADER_DOUBLE_COUNT] = 0;
header_out[HEADER_BOOLEAN_COUNT] = 0;
header_out[HEADER_STRING_COUNT] = 0;
header_out[HEADER_UNITS_COUNT] = 0;
//fprintf(stderr, "c worker mpi: handling call\\n");
must_run_loop = handle_call();
//fprintf(stderr, "c worker mpi: call handled\\n");
MPI_Barrier(MPI_COMM_WORLD);
if(rank == 0) {
MPI_Send(header_out, HEADER_SIZE, MPI_INT, 0, 999, parent);
if(header_out[HEADER_INTEGER_COUNT] > 0) {
MPI_Send(ints_out, header_out[HEADER_INTEGER_COUNT], MPI_INT, 0, 999, parent);
}
if(header_out[HEADER_LONG_COUNT] > 0) {
MPI_Send(longs_out, header_out[HEADER_LONG_COUNT], MPI_LONG_LONG_INT, 0, 999, parent);
}
if(header_out[HEADER_FLOAT_COUNT] > 0) {
MPI_Send(floats_out, header_out[HEADER_FLOAT_COUNT], MPI_FLOAT, 0, 999, parent);
}
if(header_out[HEADER_DOUBLE_COUNT] > 0) {
MPI_Send(doubles_out, header_out[HEADER_DOUBLE_COUNT], MPI_DOUBLE, 0, 999, parent);
}
if(header_out[HEADER_BOOLEAN_COUNT] > 0) {
MPI_Send(booleans_out, header_out[HEADER_BOOLEAN_COUNT], MPI_C_BOOL, 0, 999, parent);
}
if(header_out[HEADER_STRING_COUNT] > 0) {
int offset = 0;
for( int i = 0; i < header_out[HEADER_STRING_COUNT] ; i++) {
int length = strlen(strings_out[i]);
string_sizes_out[i] = length;
offset += length + 1;
}
characters_out = new char[offset + 1];
offset = 0;
for( int i = 0; i < header_out[HEADER_STRING_COUNT] ; i++) {
strcpy(characters_out+offset, strings_out[i]);
offset += string_sizes_out[i] + 1;
}
MPI_Send(string_sizes_out, header_out[HEADER_STRING_COUNT], MPI_INTEGER, 0, 999, parent);
MPI_Send(characters_out, offset, MPI_BYTE, 0, 999, parent);
}
}
if (characters_in) {
delete[] characters_in;
characters_in = 0;
}
if (characters_out) {
delete[] characters_out;
characters_out = 0;
}
//fprintf(stderr, "call done\\n");
}
delete_arrays();
for(int i = 0; i < lastid + 1; i++) {
MPI_Comm_disconnect(&communicators[i]);
}
MPI_Finalize();
//fprintf(stderr, "mpi finalized\\n");
#else
fprintf(stderr, "mpi support not compiled into worker\\n");
exit(1);
#endif
}
void run_sockets_mpi(int argc, char *argv[], int port, char *host) {
#ifndef NOMPI
bool must_run_loop = true;
int max_call_count = 10;
struct sockaddr_in serv_addr;
struct hostent *server;
int on = 1;
int provided = 0;
int rank = -1;
mpiIntercom = false;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0) {
//fprintf(stderr, "C worker: running in sockets+mpi mode\\n");
socketfd = socket(AF_INET, SOCK_STREAM, 0);
if (socketfd < 0) {
perror("ERROR opening socket");
//fprintf(stderr, "cannot open socket\\n");
exit(1);
}
//turn on no-delay option in tcp for huge speed improvement
setsockopt (socketfd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof (on));
server = gethostbyname(host);
memset((char *) &serv_addr, '\\0', sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
memcpy((char *) &serv_addr.sin_addr.s_addr, (char *) server->h_addr, server->h_length);
serv_addr.sin_port = htons(port);
if (connect(socketfd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) < 0) {
fprintf(stderr, "cannot connect socket to host %s, port %d\\n", host, port);
fprintf(stderr, "resolved IP address: %s\\n", inet_ntoa( * (struct in_addr *) server->h_addr));
perror("ERROR connecting socket");
//fprintf(stderr, "cannot connect socket\\n");
exit(1);
}
//fprintf(stderr, "sockets_mpi: finished initializing code\\n");
atexit(onexit_sockets);
}
header_in = new int[HEADER_SIZE];
header_out = new int[HEADER_SIZE];
new_arrays(max_call_count);
while(must_run_loop) {
//fprintf(stderr, "sockets_mpi: receiving header\\n");
receive_array_sockets(header_in, HEADER_SIZE * sizeof(int), socketfd, rank);
MPI_Bcast(header_in, HEADER_SIZE, MPI_INT, 0, MPI_COMM_WORLD);
//fprintf(stderr, "C sockets_mpi worker code: got header %d %d %d %d %d %d %d %d %d %d\\n", header_in[0], header_in[1], header_in[2], header_in[3], header_in[4], header_in[5], header_in[6], header_in[7], header_in[8], header_in[9]);
int call_count = header_in[HEADER_CALL_COUNT];
if (call_count > max_call_count) {
delete_arrays();
max_call_count = call_count + 255;
new_arrays(max_call_count);
}
if (header_in[HEADER_INTEGER_COUNT] > 0) {
receive_array_sockets(ints_in, header_in[HEADER_INTEGER_COUNT] * sizeof(int), socketfd, rank);
MPI_Bcast(ints_in, header_in[HEADER_INTEGER_COUNT], MPI_INTEGER, 0, MPI_COMM_WORLD);
}
if (header_in[HEADER_LONG_COUNT] > 0) {
receive_array_sockets(longs_in, header_in[HEADER_LONG_COUNT] * sizeof(long long int), socketfd, rank);
MPI_Bcast(longs_in, header_in[HEADER_LONG_COUNT], MPI_LONG_LONG_INT, 0, MPI_COMM_WORLD);
}
if(header_in[HEADER_FLOAT_COUNT] > 0) {
receive_array_sockets(floats_in, header_in[HEADER_FLOAT_COUNT] * sizeof(float), socketfd, rank);
MPI_Bcast(floats_in, header_in[HEADER_FLOAT_COUNT], MPI_FLOAT, 0, MPI_COMM_WORLD);
}
if(header_in[HEADER_DOUBLE_COUNT] > 0) {
receive_array_sockets(doubles_in, header_in[HEADER_DOUBLE_COUNT] * sizeof(double), socketfd, rank);
MPI_Bcast(doubles_in, header_in[HEADER_DOUBLE_COUNT], MPI_DOUBLE, 0, MPI_COMM_WORLD);
}
if(header_in[HEADER_BOOLEAN_COUNT] > 0) {
receive_array_sockets(booleans_in, header_in[HEADER_BOOLEAN_COUNT] * sizeof(bool), socketfd , rank);
MPI_Bcast(booleans_in, header_in[HEADER_BOOLEAN_COUNT], MPI_C_BOOL, 0, MPI_COMM_WORLD);
}
if(header_in[HEADER_STRING_COUNT] > 0) {
receive_array_sockets(string_sizes_in, header_in[HEADER_STRING_COUNT] * sizeof(int), socketfd, rank);
MPI_Bcast(string_sizes_in, header_in[HEADER_STRING_COUNT], MPI_INT, 0, MPI_COMM_WORLD);
int total_string_size = 0;
for (int i = 0; i < header_in[HEADER_STRING_COUNT];i++) {
total_string_size += string_sizes_in[i] + 1;
}
characters_in = new char[total_string_size];
receive_array_sockets(characters_in, total_string_size, socketfd, rank);
MPI_Bcast(characters_in, total_string_size, MPI_CHARACTER, 0, MPI_COMM_WORLD);
int offset = 0;
for (int i = 0 ; i < header_in[HEADER_STRING_COUNT];i++) {
strings_in[i] = characters_in + offset;
offset += string_sizes_in[i] + 1;
}
}
header_out[HEADER_FLAGS] = 0;
header_out[HEADER_CALL_ID] = header_in[HEADER_CALL_ID];
header_out[HEADER_FUNCTION_ID] = header_in[HEADER_FUNCTION_ID];
header_out[HEADER_CALL_COUNT] = call_count;
header_out[HEADER_INTEGER_COUNT] = 0;
header_out[HEADER_LONG_COUNT] = 0;
header_out[HEADER_FLOAT_COUNT] = 0;
header_out[HEADER_DOUBLE_COUNT] = 0;
header_out[HEADER_BOOLEAN_COUNT] = 0;
header_out[HEADER_STRING_COUNT] = 0;
header_out[HEADER_UNITS_COUNT] = 0;
//fprintf(stderr, "c worker sockets_mpi: handling call\\n");
must_run_loop = handle_call();
//fprintf(stderr, "c worker sockets_mpi: call handled\\n");
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 0) {
send_array_sockets(header_out, HEADER_SIZE * sizeof(int), socketfd, 0);
if(header_out[HEADER_INTEGER_COUNT] > 0) {
send_array_sockets(ints_out, header_out[HEADER_INTEGER_COUNT] * sizeof(int), socketfd, 0);
}
if(header_out[HEADER_LONG_COUNT] > 0) {
send_array_sockets(longs_out, header_out[HEADER_LONG_COUNT] * sizeof(long long int), socketfd, 0);
}
if(header_out[HEADER_FLOAT_COUNT] > 0) {
send_array_sockets(floats_out, header_out[HEADER_FLOAT_COUNT] * sizeof(float), socketfd, 0);
}
if(header_out[HEADER_DOUBLE_COUNT] > 0) {
send_array_sockets(doubles_out, header_out[HEADER_DOUBLE_COUNT] * sizeof(double), socketfd, 0);
}
if(header_out[HEADER_BOOLEAN_COUNT] > 0) {
send_array_sockets(booleans_out, header_out[HEADER_BOOLEAN_COUNT] * sizeof(bool), socketfd, 0);
}
if(header_out[HEADER_STRING_COUNT] > 0) {
int offset = 0;
for( int i = 0; i < header_out[HEADER_STRING_COUNT] ; i++) {
int length = strlen(strings_out[i]);
string_sizes_out[i] = length;
offset += length + 1;
}
characters_out = new char[offset + 1];
offset = 0;
for( int i = 0; i < header_out[HEADER_STRING_COUNT] ; i++) {
strcpy(characters_out+offset, strings_out[i]);
offset += string_sizes_out[i] + 1;
}
send_array_sockets(string_sizes_out, header_out[HEADER_STRING_COUNT] * sizeof(int), socketfd, 0);
send_array_sockets(characters_out, offset * sizeof(char), socketfd, 0);
}
//fprintf(stderr, "sockets_mpicall done\\n");
}
if (characters_in) {
delete[] characters_in;
characters_in = 0;
}
if (characters_out) {
delete[] characters_out;
characters_out = 0;
}
}
delete_arrays();
if (rank == 0) {
#ifdef WIN32
closesocket(socketfd);
#else
close(socketfd);
#endif
}
MPI_Finalize();
//fprintf(stderr, "sockets_mpi done\\n");
#else
fprintf(stderr, "mpi support not compiled into worker\\n");
exit(1);
#endif
}
void run_sockets(int port, char *host) {
bool must_run_loop = true;
int max_call_count = 10;
struct sockaddr_in serv_addr;
struct hostent *server;
int on = 1;
#ifdef WIN32
WSADATA wsaData;
int iResult;
// Initialize Winsock
iResult = WSAStartup(MAKEWORD(2,2), &wsaData);
if (iResult != 0) {
printf("WSAStartup failed: %d\\n", iResult);
exit(1);
}
#endif
mpiIntercom = false;
//fprintf(stderr, "C worker: running in sockets mode\\n");
socketfd = socket(AF_INET, SOCK_STREAM, 0);
if (socketfd < 0) {
fprintf(stderr, "cannot open socket\\n");
exit(1);
}
//turn on no-delay option in tcp for huge speed improvement
setsockopt (socketfd, IPPROTO_TCP, TCP_NODELAY, (const char *)&on, sizeof (on));
server = gethostbyname(host);
memset((char *) &serv_addr, '\\0', sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
memcpy((char *) &serv_addr.sin_addr.s_addr, (char *) server->h_addr, server->h_length);
serv_addr.sin_port = htons(port);
if (connect(socketfd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) < 0) {
fprintf(stderr, "cannot connect socket to host %s, port %d\\n", host, port);
fprintf(stderr, "resolved IP address: %s\\n", inet_ntoa( * (struct in_addr *) server->h_addr));
perror("ERROR connecting socket");
//fprintf(stderr, "cannot connect socket\\n");
exit(1);
}
//fprintf(stderr, "sockets: finished initializing code\\n");
atexit(onexit_sockets);
header_in = new int[HEADER_SIZE];
header_out = new int[HEADER_SIZE];
new_arrays(max_call_count);
while(must_run_loop) {
//fprintf(stderr, "sockets: receiving header\\n");
receive_array_sockets(header_in, HEADER_SIZE * sizeof(int), socketfd, 0);
//fprintf(stderr, "C sockets worker code: got header %d %d %d %d %d %d %d %d %d %d\\n", header_in[0], header_in[1], header_in[2], header_in[3], header_in[4], header_in[5], header_in[6], header_in[7], header_in[8], header_in[9]);
int call_count = header_in[HEADER_CALL_COUNT];
if (call_count > max_call_count) {
delete_arrays();
max_call_count = call_count + 255;
new_arrays(max_call_count);
}
if (header_in[HEADER_INTEGER_COUNT] > 0) {
receive_array_sockets(ints_in, header_in[HEADER_INTEGER_COUNT] * sizeof(int), socketfd, 0);
}
if (header_in[HEADER_LONG_COUNT] > 0) {
receive_array_sockets(longs_in, header_in[HEADER_LONG_COUNT] * sizeof(long long int), socketfd, 0);
}
if(header_in[HEADER_FLOAT_COUNT] > 0) {
receive_array_sockets(floats_in, header_in[HEADER_FLOAT_COUNT] * sizeof(float), socketfd, 0);
}
if(header_in[HEADER_DOUBLE_COUNT] > 0) {
receive_array_sockets(doubles_in, header_in[HEADER_DOUBLE_COUNT] * sizeof(double), socketfd, 0);
}
if(header_in[HEADER_BOOLEAN_COUNT] > 0) {
receive_array_sockets(booleans_in, header_in[HEADER_BOOLEAN_COUNT] * sizeof(bool), socketfd , 0);
}
if(header_in[HEADER_STRING_COUNT] > 0) {
receive_array_sockets(string_sizes_in, header_in[HEADER_STRING_COUNT] * sizeof(int), socketfd, 0);
int total_string_size = 0;
for (int i = 0; i < header_in[HEADER_STRING_COUNT];i++) {
total_string_size += string_sizes_in[i] + 1;
}
characters_in = new char[total_string_size];
receive_array_sockets(characters_in, total_string_size, socketfd, 0);
int offset = 0;
for (int i = 0 ; i < header_in[HEADER_STRING_COUNT];i++) {
strings_in[i] = characters_in + offset;
offset += string_sizes_in[i] + 1;
}
}
header_out[HEADER_FLAGS] = 0;
header_out[HEADER_CALL_ID] = header_in[HEADER_CALL_ID];
header_out[HEADER_FUNCTION_ID] = header_in[HEADER_FUNCTION_ID];
header_out[HEADER_CALL_COUNT] = call_count;
header_out[HEADER_INTEGER_COUNT] = 0;
header_out[HEADER_LONG_COUNT] = 0;
header_out[HEADER_FLOAT_COUNT] = 0;
header_out[HEADER_DOUBLE_COUNT] = 0;
header_out[HEADER_BOOLEAN_COUNT] = 0;
header_out[HEADER_STRING_COUNT] = 0;
header_out[HEADER_UNITS_COUNT] = 0;
//fprintf(stderr, "c worker sockets: handling call\\n");
must_run_loop = handle_call();
//fprintf(stderr, "c worker sockets: call handled\\n");
send_array_sockets(header_out, HEADER_SIZE * sizeof(int), socketfd, 0);
if(header_out[HEADER_INTEGER_COUNT] > 0) {
send_array_sockets(ints_out, header_out[HEADER_INTEGER_COUNT] * sizeof(int), socketfd, 0);
}
if(header_out[HEADER_LONG_COUNT] > 0) {
send_array_sockets(longs_out, header_out[HEADER_LONG_COUNT] * sizeof(long long int), socketfd, 0);
}
if(header_out[HEADER_FLOAT_COUNT] > 0) {
send_array_sockets(floats_out, header_out[HEADER_FLOAT_COUNT] * sizeof(float), socketfd, 0);
}
if(header_out[HEADER_DOUBLE_COUNT] > 0) {
send_array_sockets(doubles_out, header_out[HEADER_DOUBLE_COUNT] * sizeof(double), socketfd, 0);
}
if(header_out[HEADER_BOOLEAN_COUNT] > 0) {
send_array_sockets(booleans_out, header_out[HEADER_BOOLEAN_COUNT] * sizeof(bool), socketfd, 0);
}
if(header_out[HEADER_STRING_COUNT] > 0) {
int offset = 0;
for( int i = 0; i < header_out[HEADER_STRING_COUNT] ; i++) {
int length = strlen(strings_out[i]);
string_sizes_out[i] = length;
offset += length + 1;
}
characters_out = new char[offset + 1];
offset = 0;
for( int i = 0; i < header_out[HEADER_STRING_COUNT] ; i++) {
strcpy(characters_out+offset, strings_out[i]);
offset += string_sizes_out[i] + 1;
}
send_array_sockets(string_sizes_out, header_out[HEADER_STRING_COUNT] * sizeof(int), socketfd, 0);
send_array_sockets(characters_out, offset * sizeof(char), socketfd, 0);
}
if (characters_in) {
delete[] characters_in;
characters_in = 0;
}
if (characters_out) {
delete[] characters_out;
characters_out = 0;
}
//fprintf(stderr, "call done\\n");
}
delete_arrays();
#ifdef WIN32
closesocket(socketfd);
#else
close(socketfd);
#endif
//fprintf(stderr, "sockets done\\n");
}
int main(int argc, char *argv[]) {
int port;
bool use_mpi;
char *host;
//for(int i = 0 ; i < argc; i++) {
// fprintf(stderr, "argument %d is %s\\n", i, argv[i]);
//}
if (argc == 1) {
run_mpi(argc, argv);
} else if (argc == 4) {
port = atoi(argv[1]);
host = argv[2];
if (strcmp(argv[3], "true") == 0) {
use_mpi = true;
} else if (strcmp(argv[3], "false") == 0) {
use_mpi = false;
} else {
fprintf(stderr, "mpi enabled setting must be either 'true' or 'false', not %s\\n", argv[2]);
fprintf(stderr, "usage: %s [PORT HOST MPI_ENABLED]\\n", argv[0]);
exit(1);
}
if (use_mpi) {
run_sockets_mpi(argc, argv, port, host);
} else {
run_sockets(port, host);
}
} else {
fprintf(stderr, "%s need either 0 or 4 arguments, not %d\\n", argv[0], argc);
fprintf(stderr, "usage: %s [PORT HOST MPI_ENABLED]\\n", argv[0]);
exit(1);
}
return 0;
}
"""
GETSET_WORKING_DIRECTORY="""
char path_buffer[4096];
int set_working_directory(char *c) {
return chdir(c);
}
int get_working_directory(char **c) {
if(getcwd( path_buffer , sizeof(path_buffer))==NULL) {
return -1;
} else {
*c=path_buffer;
return 0;
}
}
"""
class MakeCCodeString(GenerateASourcecodeString):
@late
def dtype_to_spec(self):
return dtype_to_spec
class GenerateACStringOfAFunctionSpecification(MakeCCodeString):
@late
def specification(self):
raise exceptions.AmuseException("No specification set, please set the specification first")
def start(self):
self.specification.prepare_output_parameters()
self.output_casestmt_start()
self.out.indent()
if self.specification.must_handle_array:
pass
elif self.specification.can_handle_array:
self.out.lf() + 'for (int i = 0 ; i < call_count; i++){'
self.out.indent()
self.output_copy_inout_variables()
self.output_function_start()
self.output_function_parameters()
self.output_function_end()
if self.specification.must_handle_array:
if not self.specification.result_type is None:
spec = self.dtype_to_spec[self.specification.result_type]
self.out.lf() + 'for (int i = 1 ; i < call_count; i++){'
self.out.indent()
self.out.lf() + spec.output_var_name + '[i]' + ' = ' + spec.output_var_name + '[0]' + ';'
self.out.dedent()
self.out.lf() + '}'
elif self.specification.can_handle_array:
self.out.dedent()
self.out.lf() + '}'
self.output_lines_with_number_of_outputs()
self.output_casestmt_end()
self.out.dedent()
self._result = self.out.string
def index_string(self, index, must_copy_in_to_out=False):
if self.specification.must_handle_array and not must_copy_in_to_out:
if index == 0:
return '0'
else:
return '( %d * call_count)' % index
elif self.specification.can_handle_array or (self.specification.must_handle_array and must_copy_in_to_out):
if index == 0:
return 'i'
else:
return '( %d * call_count) + i' % index
else:
return index
def input_var(self, name, index):
if self.specification.must_handle_array:
self.output_var(name, index)
else:
self.out.n() + name
self.out + '[' + self.index_string(index) + ']'
def output_var(self, name, index):
self.out.n() + '&' + name
self.out + '[' + self.index_string(index) + ']'
def output_function_parameters(self):
self.out.indent()
first = True
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if first:
first = False
else:
self.out + ' ,'
if parameter.direction == LegacyFunctionSpecification.IN:
self.input_var(spec.input_var_name, parameter.input_index)
if parameter.direction == LegacyFunctionSpecification.INOUT:
self.output_var(spec.output_var_name, parameter.output_index)
elif parameter.direction == LegacyFunctionSpecification.OUT:
self.output_var(spec.output_var_name, parameter.output_index)
elif parameter.direction == LegacyFunctionSpecification.LENGTH:
self.out.n() + 'call_count'
self.out.dedent()
def output_copy_inout_variables(self):
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if parameter.direction == LegacyFunctionSpecification.INOUT:
if self.specification.must_handle_array:
self.out.lf() + 'for (int i = 0 ; i < call_count; i++){'
self.out.indent()
self.out.n() + spec.output_var_name
self.out + '[' + self.index_string(parameter.output_index, must_copy_in_to_out=True) + ']'
self.out + ' = '
self.out + spec.input_var_name + '[' + self.index_string(parameter.input_index, must_copy_in_to_out=True) + ']' + ';'
if self.specification.must_handle_array:
self.out.dedent()
self.out.lf() + '}'
def output_lines_with_number_of_outputs(self):
dtype_to_count = {}
for parameter in self.specification.output_parameters:
count = dtype_to_count.get(parameter.datatype, 0)
dtype_to_count[parameter.datatype] = count + 1
if not self.specification.result_type is None:
count = dtype_to_count.get(self.specification.result_type, 0)
dtype_to_count[self.specification.result_type] = count + 1
for dtype in dtype_to_count:
spec = self.dtype_to_spec[dtype]
count = dtype_to_count[dtype]
self.out.n()
self.out + 'header_out[' + spec.counter_name
self.out + '] = ' + count + ' * call_count;'
pass
def output_function_end(self):
if len(self.specification.parameters) > 0:
self.out.n()
self.out + ')' + ';'
def output_function_start(self):
self.out.n()
if not self.specification.result_type is None:
spec = self.dtype_to_spec[self.specification.result_type]
self.out + spec.output_var_name
self.out + '[' + self.index_string(0) + ']' + ' = '
self.out + self.specification.name + '('
def output_casestmt_start(self):
self.out + 'case ' + self.specification.id + ':'
def output_casestmt_end(self):
self.out.n() + 'break;'
class GenerateACHeaderDefinitionStringFromAFunctionSpecification(MakeCCodeString):
def start(self):
self.output_function_start()
self.output_function_parameters()
self.output_function_end()
self._result = self.out.string
def output_function_parameters(self):
first = True
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if first:
first = False
else:
self.out + ', '
if parameter.datatype == 'string':
self.out + 'char'
else:
self.out + spec.type
self.out + ' '
if parameter.is_output() or (parameter.is_input() and self.specification.must_handle_array):
self.out + '*' + ' '
if parameter.datatype == 'string':
self.out + '*' + ' '
self.out + parameter.name
def output_function_end(self):
self.out + ')' + ';'
def output_function_start(self):
self.out.n()
if not self.specification.result_type is None:
spec = self.dtype_to_spec[self.specification.result_type]
self.out + spec.type
self.out + ' '
else:
self.out + 'void' + ' '
self.out + self.specification.name + '('
class GenerateACSourcecodeStringFromASpecificationClass\
(GenerateASourcecodeStringFromASpecificationClass):
@late
def specification_class(self):
raise exceptions.AmuseException("No specification_class set, please set the specification_class first")
@late
def dtype_to_spec(self):
return dtype_to_spec
def output_sourcecode_for_function(self):
return GenerateACStringOfAFunctionSpecification()
def start(self):
self.out + HEADER_CODE_STRING
self.output_local_includes()
self.output_needs_mpi()
self.output_code_constants()
self.out.lf() + CONSTANTS_AND_GLOBAL_VARIABLES_STRING
self.out.lf() + POLLING_FUNCTIONS_STRING
self.out.lf() + GETSET_WORKING_DIRECTORY
if self.must_generate_mpi:
self.out.lf() + RECV_HEADER_SLEEP_STRING
self.output_handle_call()
self.out.lf() + FOOTER_CODE_STRING
self._result = self.out.string
def output_local_includes(self):
if hasattr(self.specification_class, 'include_headers'):
for x in self.specification_class.include_headers:
self.out.n() + '#include "' + x + '"'
self.out.lf()
def output_needs_mpi(self):
if self.needs_mpi and self.must_generate_mpi:
self.out.lf() + 'static bool NEEDS_MPI = true;'
else:
self.out.lf() + 'static bool NEEDS_MPI = false;'
self.out.lf().lf()
def output_code_constants(self):
for dtype in self.dtype_to_spec.keys():
dtype_spec = self.dtype_to_spec[dtype]
maxin = self.mapping_from_dtype_to_maximum_number_of_inputvariables.get(dtype, 0)
self.out + 'static int MAX_' + dtype_spec.input_var_name.upper() + ' = ' + maxin + ";"
self.out.lf()
maxout = self.mapping_from_dtype_to_maximum_number_of_outputvariables.get(dtype, 0)
self.out + 'static int MAX_' + dtype_spec.output_var_name.upper() + ' = ' + maxout + ";"
self.out.lf()
def output_handle_call(self):
self.out.lf().lf() + 'bool handle_call() {'
self.out.indent()
self.out.lf() + 'int call_count = header_in[HEADER_CALL_COUNT];'
self.out.lf().lf() + 'switch(header_in[HEADER_FUNCTION_ID]) {'
self.out.indent()
self.out.lf() + 'case 0:'
self.out.indent().lf() + 'return false;'
self.out.lf() + 'break;'
self.out.dedent()
self.output_sourcecode_for_functions()
self.out.lf() + 'default:'
self.out.indent()
self.out.lf() + 'header_out[HEADER_FLAGS] = header_out[HEADER_FLAGS] | ERROR_FLAG;'
self.out.lf() + 'strings_out[0] = new char[100];'
self.out.lf() + 'sprintf(strings_out[0], "unknown function id: %d\\n", header_in[HEADER_FUNCTION_ID]);'
self.out.lf() + 'fprintf(stderr, "unknown function id: %d\\n", header_in[HEADER_FUNCTION_ID]);'
self.out.lf() + 'header_out[HEADER_STRING_COUNT] = 1;'
self.out.dedent()
self.out.dedent().lf() + '}'
self.out.dedent()
self.out.indent().lf() + 'return true;'
self.out.dedent().lf() + '}'
class GenerateACHeaderStringFromASpecificationClass\
(GenerateASourcecodeStringFromASpecificationClass):
@late
def ignore_functions_from_specification_classes(self):
return []
@late
def underscore_functions_from_specification_classes(self):
return []
@late
def dtype_to_spec(self):
return dtype_to_spec
@late
def make_extern_c(self):
return True
def must_include_interface_function_in_output(self, x):
if hasattr(x.specification,"internal_provided"):
return False
for cls in self.ignore_functions_from_specification_classes:
if hasattr(cls, x.specification.name):
return False
return True
def output_sourcecode_for_function(self):
return GenerateACHeaderDefinitionStringFromAFunctionSpecification()
def start(self):
self.out + '#include "stdbool.h"'
self.out.lf()
if self.make_extern_c:
self.out + "#ifdef __cplusplus"
self.out.lf() + 'extern "C" {'
self.out.lf() + "#endif"
self.out.lf()
self.output_sourcecode_for_functions()
if self.make_extern_c:
self.out + "#ifdef __cplusplus"
self.out.lf() + '}'
self.out.lf() + "#endif"
self.out.lf()
self.out.lf()
self._result = self.out.string
class GenerateACStubStringFromASpecificationClass\
(GenerateASourcecodeStringFromASpecificationClass):
@late
def dtype_to_spec(self):
return dtype_to_spec
@late
def make_extern_c(self):
return False
def output_sourcecode_for_function(self):
return create_definition.CreateCStub()
def must_include_interface_function_in_output(self, x):
return not hasattr(x.specification,"internal_provided")
def start(self):
self.output_local_includes()
self.out.lf()
if self.make_extern_c:
self.out + 'extern "C" {'
self.out.indent().lf()
self.output_sourcecode_for_functions()
if self.make_extern_c:
self.out.dedent().lf() + '}'
self.out.lf()
self._result = self.out.string
def output_local_includes(self):
self.out.n()
if hasattr(self.specification_class, 'include_headers'):
for x in self.specification_class.include_headers:
self.out.n() + '#include "' + x + '"'
| 45,482
| 30.067623
| 236
|
py
|
amuse
|
amuse-main/src/amuse/rfi/tools/create_dir.py
|
import os
from amuse.support import get_amuse_root_dir
from amuse.support.core import late, print_out
from amuse.support.options import option
from amuse.support.options import OptionalAttributes
interface_file_template = """\
from amuse.community import *
# low level interface class
class {0.name_of_the_community_interface_class}({0.name_of_the_superclass_for_the_community_code_interface_class}):
{0.include_headers_or_modules}
def __init__(self, **keyword_arguments):
{0.name_of_the_superclass_for_the_community_code_interface_class}.__init__(self, name_of_the_worker="{0.name_of_the_community_code}_worker", **keyword_arguments)
# here you must specify the prototypes of the interface functions:
@legacy_function
def echo_int():
function = LegacyFunctionSpecification()
function.addParameter('int_in', dtype='int32', direction=function.IN, unit=None)
function.addParameter('int_out', dtype='int32', direction=function.OUT, unit=None)
function.result_type = 'int32'
function.can_handle_array = True
return function
# optionally, this can be shortened to:
# @remote_function(can_handle_array=True)
# def echo_int(int_in='i'):
# returns (int_out='i')
# high level interface class
class {0.name_of_the_code_interface_class}({0.name_of_the_superclass_for_the_code_interface_class}):
def __init__(self, **options):
{0.name_of_the_superclass_for_the_code_interface_class}.__init__(self, {0.name_of_the_community_interface_class}(**options), **options)
# the following alternative __init__ is appropiate for codes that use an unspecified unit system
# (ie the quantities have dimension but no definite scale)
#
# def __init__(self, unit_converter=None, **options):
# self.unit_converter=unit_converter
# {0.name_of_the_superclass_for_the_code_interface_class}.__init__(self, {0.name_of_the_community_interface_class}(**options), **options)
#
# in this case you also need to use the define_converter below
# typically the high level specification also contains the following:
# the definition of the state model of the code
def define_state(self, handler):
# for example:
# handler.set_initial_state('UNINITIALIZED')
# handler.add_transition('!UNINITIALIZED!STOPPED', 'END', 'cleanup_code')
# handler.add_transition('END', 'STOPPED', 'stop', False)
# handler.add_transition(
# 'UNINITIALIZED', 'INITIALIZED', 'initialize_code')
# handler.add_method('STOPPED', 'stop')
pass
# the definition of any properties
def define_properties(self, handler):
# handler.add_property('name_of_the_getter', public_name="name_of_the_property")
pass
# the definition of the parameters
def define_parameters(self, handler):
# handler.add_method_parameter(
# "name_of_the_getter",
# "name_of_the_setter",
# "parameter_name",
# "description",
# default_value = <default value>
# )
pass
# the definition of the code data stores, either particle sets:
def define_particle_sets(self, handler):
# handler.define_set('particles', 'index_of_the_particle')
# handler.set_new('particles', 'new_particle')
# handler.set_delete('particles', 'delete_particle')
# handler.add_setter('particles', 'set_state')
# handler.add_getter('particles', 'get_state')
# handler.add_setter('particles', 'set_mass')
# handler.add_getter('particles', 'get_mass', names=('mass',))
pass
# and/or grids:
def define_grids(self, handler):
# handler.define_grid('grid',axes_names = ["x", "y"], grid_class=StructuredGrid)
# handler.set_grid_range('grid', '_grid_range')
# handler.add_getter('grid', 'get_grid_position', names=["x", "y"])
# handler.add_getter('grid', 'get_rho', names=["density"])
# handler.add_setter('grid', 'set_rho', names=["density"])
pass
# this handles unit conversion if an (optional) unit converter is specified
# def define_converter(self, handler):
# if self.unit_converter is not None:
# handler.set_converter(
# self.unit_converter.as_converter_from_si_to_generic()
# )
"""
test_file_template = """\
from amuse.test.amusetest import TestWithMPI
from {0.name_for_import_of_the_interface_module} import {0.name_of_the_community_interface_class}
from {0.name_for_import_of_the_interface_module} import {0.name_of_the_code_interface_class}
class {0.name_of_the_community_interface_class}Tests(TestWithMPI):
def test1(self):
instance = {0.name_of_the_community_interface_class}()
result,error = instance.echo_int(12)
self.assertEquals(error, 0)
self.assertEquals(result, 12)
instance.stop()
"""
makefile_template_cxx = """\
# standard amuse configuration include
# config.mk will be made after ./configure has run
ifeq ($(origin AMUSE_DIR), undefined)
AMUSE_DIR := $(shell amusifier --get-amuse-dir)
endif
-include $(AMUSE_DIR)/config.mk
MPICXX ?= mpicxx
CFLAGS += -Wall -g
CXXFLAGS += $(CFLAGS)
LDFLAGS += -lm $(MUSE_LD_FLAGS)
OBJS = {0.name_of_the_interface_code}.o
CODELIB = src/lib{0.name_of_the_community_code}.a
all: {0.name_of_the_community_code}_worker
clean:
\t$(RM) -rf __pycache__
\t$(RM) -f *.so *.o *.pyc worker_code.cc worker_code.h
\t$(RM) *~ {0.name_of_the_community_code}_worker worker_code.cc
\tmake -C src clean
distclean: clean
\tmake -C src distclean
$(CODELIB):
\tmake -C src all
worker_code.cc: {0.name_of_the_python_module}
\t$(CODE_GENERATOR) --type=c interface.py {0.name_of_the_community_interface_class} -o $@
worker_code.h: {0.name_of_the_python_module}
\t$(CODE_GENERATOR) --type=H interface.py {0.name_of_the_community_interface_class} -o $@
{0.name_of_the_community_code}_worker: worker_code.cc worker_code.h $(CODELIB) $(OBJS)
\t$(MPICXX) $(CXXFLAGS) $< $(OBJS) $(CODELIB) -o $@
.cc.o: $<
\t$(CXX) $(CXXFLAGS) -c -o $@ $<
"""
code_makefile_template_cxx = """\
CFLAGS += -Wall -g
CXXFLAGS += $(CFLAGS)
LDFLAGS += -lm $(MUSE_LD_FLAGS)
CODELIB = lib{0.name_of_the_community_code}.a
CODEOBJS = test.o
AR = ar ruv
RANLIB = ranlib
RM = rm
all: $(CODELIB)
clean:
\t$(RM) -f *.o *.a
distclean: clean
$(CODELIB): $(CODEOBJS)
\t$(RM) -f $@
\t$(AR) $@ $(CODEOBJS)
\t$(RANLIB) $@
.cc.o: $<
\t$(CXX) $(CXXFLAGS) -c -o $@ $<
"""
code_examplefile_template_cxx = """\
/*
* Example function for a code
*/
int echo(int input){
return input;
}
"""
interface_examplefile_template_cxx = """\
extern int echo(int input);
/*
* Interface code
*/
int echo_int(int input, int * output){
*output = echo(input);
return 0;
}
"""
class CreateADirectoryAndPopulateItWithFiles(OptionalAttributes):
@late
def path_of_the_root_directory(self):
return os.path.dirname(os.path.dirname(__file__))
@late
def name_of_the_community_code(self):
return self.name_of_the_code_interface_class.lower()
@late
def name_of_the_python_module(self):
return 'interface.py'
@late
def name_of_the_test_module(self):
return 'test_{0}.py'.format(self.name_of_the_community_code)
@late
def name_of_the_interface_code(self):
return 'interface'
@late
def name_of_the_code_interface_class(self):
return 'MyCode'
@late
def name_of_the_community_interface_class(self):
return self.name_of_the_code_interface_class + 'Interface'
@late
def name_of_the_code_directory(self):
return 'src'
@late
def name_for_import_of_the_interface_module(self):
return '.' + self.name_of_the_python_module[:-3]
@late
def path_of_the_community_code(self):
return os.path.join(self.path_of_the_root_directory, self.name_of_the_community_code)
@late
def path_of_the_source_code(self):
return os.path.join(self.path_of_the_community_code, self.name_of_the_code_directory)
@late
def path_of_the_init_file(self):
return os.path.join(self.path_of_the_community_code, '__init__.py')
@late
def path_of_the_interface_file(self):
return os.path.join(self.path_of_the_community_code, self.name_of_the_python_module)
@late
def path_of_the_test_file(self):
return os.path.join(self.path_of_the_community_code, self.name_of_the_test_module)
@late
def path_of_the_makefile(self):
return os.path.join(self.path_of_the_community_code, 'Makefile')
@late
def path_of_the_code_makefile(self):
return os.path.join(self.path_of_the_source_code, 'Makefile')
@late
def path_of_the_code_examplefile(self):
raise NotImplementedError()
@late
def path_of_the_interface_examplefile(self):
raise NotImplementedError()
@late
def path_of_amuse(self):
return self.amuse_root_dir
@late
def reference_to_amuse_path(self):
return os.path.relpath(self.path_of_amuse, self.path_of_the_community_code)
@late
def name_of_the_superclass_for_the_community_code_interface_class(self):
return "CodeInterface"
@late
def name_of_the_superclass_for_the_code_interface_class(self):
return "InCodeComponentImplementation"
@late
def amuse_root_dir(self):
return get_amuse_root_dir()
@late
def include_headers_or_modules(self):
return "include_headers = ['worker_code.h']"
def start(self):
self.make_directories()
self.make_python_files()
self.make_makefile()
self.make_example_files()
def make_directories(self):
os.mkdir(self.path_of_the_community_code)
os.mkdir(self.path_of_the_source_code)
def make_python_files(self):
with open(self.path_of_the_init_file, "w") as f:
f.write("# generated file")
with open(self.path_of_the_interface_file, "w") as f:
string = interface_file_template.format(self)
f.write(string)
with open(self.path_of_the_test_file, "w") as f:
string = test_file_template.format(self)
f.write(string)
def make_makefile(self):
pass
def make_example_files(self):
pass
class CreateADirectoryAndPopulateItWithFilesForACCode(CreateADirectoryAndPopulateItWithFiles):
@late
def path_of_the_code_examplefile(self):
return os.path.join(self.path_of_the_source_code, 'test.cc')
@late
def path_of_the_interface_examplefile(self):
return os.path.join(self.path_of_the_community_code, self.name_of_the_interface_code + '.cc')
def make_makefile(self):
with open(self.path_of_the_makefile, "w") as f:
string = makefile_template_cxx.format(self)
f.write(string)
def make_example_files(self):
with open(self.path_of_the_code_makefile, "w") as f:
string = code_makefile_template_cxx.format(self)
f.write(string)
with open(self.path_of_the_code_examplefile, "w") as f:
string = code_examplefile_template_cxx
f.write(string)
with open(self.path_of_the_interface_examplefile, "w") as f:
string = interface_examplefile_template_cxx
f.write(string)
makefile_template_fortran = """\
# standard amuse configuration include
# config.mk will be made after ./configure has run
ifeq ($(origin AMUSE_DIR), undefined)
AMUSE_DIR := $(shell amusifier --get-amuse-dir)
endif
-include $(AMUSE_DIR)/config.mk
MPIFC ?= mpif90
FC = $(MPIFC)
LDFLAGS += -lm $(MUSE_LD_FLAGS)
OBJS = {0.name_of_the_interface_code}.o
CODELIB = src/lib{0.name_of_the_community_code}.a
# needed if code functions are accessed through a module
FCFLAGS+= -I$(realpath ./src)
all: {0.name_of_the_community_code}_worker
clean:
\t$(RM) -rf __pycache__
\t$(RM) -f *.mod *.so *.o *.pyc worker_code.cc worker_code.h
\t$(RM) *~ {0.name_of_the_community_code}_worker worker_code.f90
\tmake -C src clean
distclean: clean
\tmake -C src distclean
$(CODELIB):
\tmake -C src all
worker_code.f90: {0.name_of_the_python_module}
\t$(CODE_GENERATOR) --type=f90 interface.py {0.name_of_the_community_interface_class} -o $@
{0.name_of_the_community_code}_worker: worker_code.f90 $(CODELIB) $(OBJS)
\t$(MPIFC) $(FCFLAGS) $(FS_FLAGS) $< $(OBJS) $(CODELIB) $(FS_LIBS) -o $@
%.o: %.f90
\t$(FC) $(FCFLAGS) -c -o $@ $<
"""
code_makefile_template_fortran = """\
MPIFC ?= mpif90
FC = $(MPIFC)
LDFLAGS += -lm $(MUSE_LD_FLAGS)
CODELIB = lib{0.name_of_the_community_code}.a
CODEOBJS = test.o
AR = ar ruv
RANLIB = ranlib
RM = rm
all: $(CODELIB)
clean:
\t$(RM) -f *.o *.a *.mod
distclean: clean
$(CODELIB): $(CODEOBJS)
\t$(RM) -f $@
\t$(AR) $@ $(CODEOBJS)
\t$(RANLIB) $@
%.o: %.f90
\t$(FC) $(FCFLAGS) -c -o $@ $<
"""
code_examplefile_template_fortran = """\
function echo(input)
integer echo, input
echo = input
end function
"""
interface_examplefile_template_fortran = """\
module {0.name_of_the_interface_module}
contains
function echo_int(input, output)
integer :: echo
integer :: echo_int
integer :: input, output
output = echo(input)
echo_int = 0
end function
end module
"""
class CreateADirectoryAndPopulateItWithFilesForAFortranCode(CreateADirectoryAndPopulateItWithFiles):
@late
def path_of_the_code_examplefile(self):
return os.path.join(self.path_of_the_source_code, 'test.f90')
@late
def path_of_the_interface_examplefile(self):
return os.path.join(self.path_of_the_community_code, self.name_of_the_interface_code + '.f90')
@late
def include_headers_or_modules(self):
return 'use_modules=["{0}"]'.format(self.name_of_the_interface_module)
@late
def name_of_the_interface_module(self):
return '{0}Interface'.format(self.name_of_the_community_code)
def make_makefile(self):
with open(self.path_of_the_makefile, "w") as f:
string = makefile_template_fortran.format(self)
f.write(string)
def make_example_files(self):
with open(self.path_of_the_code_makefile, "w") as f:
string = code_makefile_template_fortran.format(self)
f.write(string)
with open(self.path_of_the_code_examplefile, "w") as f:
string = code_examplefile_template_fortran
f.write(string)
with open(self.path_of_the_interface_examplefile, "w") as f:
string = interface_examplefile_template_fortran.format(self)
f.write(string)
| 15,062
| 27.856322
| 169
|
py
|
amuse
|
amuse-main/src/amuse/rfi/tools/create_python_worker.py
|
from amuse.support.core import late, print_out
from amuse.support.options import option
from amuse.support.options import OptionalAttributes
from amuse.support import get_amuse_root_dir
import os
import inspect
import sys
class CreateAPythonWorker(OptionalAttributes):
@option(sections=['data'])
def amuse_root_dir(self):
return get_amuse_root_dir()
@late
def channel_type(self):
return 'mpi'
@late
def template_dir(self):
return os.path.dirname(__file__)
@late
def worker_dir(self):
return os.path.abspath(os.path.curdir)
@late
def template_string(self):
path = self.template_dir
path = os.path.join(path, 'python_code_script.template')
with open(path, "r") as f:
template_string = f.read()
return template_string
@late
def worker_name(self):
filename = os.path.basename(inspect.getfile(self.implementation_factory))
filename = filename.split('.')[0]
filename.replace(os.sep, '_')
path = os.path.join(self.worker_dir, filename)
return path
@late
def output_name(self):
executable_path = self.worker_name
return executable_path
@late
def interface_class(self):
return self.specification_class
def new_executable_script_string(self):
return self.template_string.format(
executable = sys.executable,
syspath = ','.join(map(repr, sys.path)),
factory_module = inspect.getmodule(self.implementation_factory).__name__,
factory = self.implementation_factory.__name__,
interface_module = inspect.getmodule(self.interface_class).__name__,
interface = self.interface_class.__name__,
)
@property
def result(self):
return self.new_executable_script_string()
def start(self):
string = self.new_executable_script_string()
with open(self.output_name, 'w') as f:
f.write(string)
os.chmod(self.output_name, 0o777)
| 2,216
| 26.036585
| 85
|
py
|
amuse
|
amuse-main/src/amuse/rfi/tools/create_definition.py
|
import re
from amuse.support.core import late, print_out
def strip_indent(string_with_indents):
return re.sub('^ *\n', '', string_with_indents.rstrip())
class CodeDocStringProperty(object):
"""
Return a docstring generated from a
function specification
"""
def __get__(self, instance, owner):
if instance is None:
if hasattr(owner, "__init__"):
return owner.__init__.__doc__
else:
return self
usecase = CreateDescriptionOfAFunctionSpecification()
usecase.specification = instance.specification
usecase.start()
return usecase.out.string
class CreateDescriptionOfAFunctionSpecification(object):
@late
def out(self):
return print_out()
def start(self):
self.output_function_description()
self.out.lf()
self.output_cfunction_definition()
self.output_fortran_function_definition()
self.out.lf()
self.output_parameter_descriptions()
self.output_parameter_returntype()
self.out.lf()
def output_function_description(self):
self.output_multiline_string(self.specification.description)
self.out.lf()
def output_multiline_string(self, string):
lines = string.split('\n')
first = True
for line in lines:
if first:
first = False
else:
self.out.lf()
self.out + line
def output_cfunction_definition(self):
self.out + '.. code-block:: c'
self.out.indent()
self.out.lf().lf()
x = CreateCStub()
x.convert_datatypes = False
x.out = self.out
x.specification = self.specification
x.start()
self.out.dedent()
self.out.lf().lf()
def output_fortran_function_definition(self):
self.out + '.. code-block:: fortran'
self.out.indent()
self.out.lf().lf()
x = CreateFortranStub()
x.out = self.out
x.specification = self.specification
x.start()
self.out.dedent()
self.out.lf().lf()
def output_parameter_descriptions(self):
for parameter in self.specification.parameters:
self.out.lf()
self.out + ':param ' + parameter.name + ': '
self.out.indent()
self.output_multiline_string(strip_indent(parameter.description))
self.out.dedent()
self.out.lf()
self.out + ':type ' + parameter.name + ': '
self.out + parameter.datatype + ', '
self.output_parameter_direction(parameter)
def output_parameter_direction(self, parameter):
#self.out + '('
if parameter.direction == self.specification.IN:
self.out + 'IN'
if parameter.direction == self.specification.INOUT:
self.out + 'INOUT'
if parameter.direction == self.specification.OUT:
self.out + 'OUT'
#self.out + ')'
def output_parameter_returntype(self):
if self.specification.result_type is None:
return
self.out.lf()
self.out + ':returns: '
self.out.indent()
self.output_multiline_string(strip_indent(self.specification.result_doc))
self.out.dedent()
class CreateInterfaceDefinitionDocument(object):
@late
def out(self):
return print_out()
def start(self):
pass
class CreateFortranStub(object):
@late
def out(self):
return print_out()
def start(self):
self.output_subprogram_start()
self.output_parameter_type_definiton_lines()
if not self.output_definition_only:
self.output_subprogram_content()
self.output_subprogram_end()
@late
def specification_is_for_function(self):
return not self.specification.result_type is None
@late
def output_definition_only(self):
return True
@late
def subprogram_string(self):
if self.specification_is_for_function:
return 'function'
else:
return 'subroutine'
@late
def dtype_to_parameters(self):
result = {}
for parameter in self.specification.parameters:
parameters = result.get(parameter.datatype,[])
parameters.append(parameter)
result[parameter.datatype] = parameters
return result
@late
def dtype_to_fortrantype(self):
return {
'int32':'integer',
'float64':'double precision',
'float32':'real',
'string':'character(len=*)',
'bool':'logical',
}
def output_subprogram_start(self):
self.out + self.subprogram_string + ' '
self.out + self.specification.name
self.out + '('
self.out.indent()
self.out.indent()
first = True
for parameter in self.specification.parameters:
if first:
first = False
else:
self.out + ', '
length_of_the_argument_statement = len(parameter.name)
new_length_of_the_line = self.out.number_of_characters_on_current_line + length_of_the_argument_statement
if new_length_of_the_line > 74:
self.out + ' &'
self.out.lf()
self.out + parameter.name
self.out + ')'
self.out.dedent()
if not self.output_definition_only:
self.out.lf() + 'implicit none'
def output_parameter_type_definiton_lines(self):
for dtype,parameters in self.dtype_to_parameters.items():
typestring = self.dtype_to_fortrantype[dtype]
first = True
self.out.lf()
self.out + typestring + ' :: '
for parameter in parameters:
length_of_the_argument_statement = len(parameter.name)
new_length_of_the_line = self.out.number_of_characters_on_current_line + length_of_the_argument_statement
if new_length_of_the_line > 74:
first = True
self.out.lf()
self.out + typestring + ' :: '
if first:
first = False
else:
self.out + ', '
self.out + parameter.name
self.output_function_type()
def output_function_type(self):
if self.specification_is_for_function:
typestring = self.dtype_to_fortrantype[self.specification.result_type]
self.out.lf()
self.out + typestring + ' :: ' + self.specification.name
def output_subprogram_end(self):
self.out.dedent()
self.out.lf()
self.out + 'end ' + self.subprogram_string
def output_subprogram_content(self):
if not self.specification.result_type is None:
self.out.lf()
self.out + self.specification.name + '=' + self.dtype_to_returnvalue[self.specification.result_type]
@late
def dtype_to_returnvalue(self):
return {
'int32':'0',
'float64':'0.0',
'float32':'0.0',
'string':'0',
'bool':'0',
}
class CreateCStub(object):
@late
def out(self):
return print_out()
def start(self):
if self.specification.result_type is None:
self.out + 'void '
else:
typestring = self.dtype_to_ctype[self.specification.result_type]
self.out + typestring
self.out + ' '
self.out + self.specification.name
self.out + '('
self.out.indent()
first = True
for parameter in self.specification.parameters:
typestring = self.dtype_to_ctype[parameter.datatype]
if first:
first = False
else:
self.out + ', '
length_of_the_argument_statement = len(typestring) + len(parameter.name) + 3
new_length_of_the_line = self.out.number_of_characters_on_current_line + length_of_the_argument_statement
if new_length_of_the_line > 74:
self.out.lf()
self.out + typestring
if parameter.is_output():
self.out + ' *'
self.out + ' ' + parameter.name
self.out.dedent()
self.out + ')'
if self.output_definition_only:
self.out + ';'
else:
self.output_function_content()
self._result = self.out.string
@late
def result(self):
self.start()
return self._result
def output_function_content(self):
self.out + '{'
self.out.indent()
if not self.specification.result_type is None:
self.out.lf()
self.out + 'return ' + self.dtype_to_returnvalue[self.specification.result_type] + ';'
self.out.dedent().lf()
self.out + '}'
@late
def dtype_to_parameters(self):
result = {}
for parameter in self.specification.parameters:
parameters = result.get(parameter.datatype,[])
parameters.append(parameter)
result[parameter.datatype] = parameters
return result
@late
def output_definition_only(self):
return (not self.convert_datatypes)
@late
def convert_datatypes(self):
return True
@late
def dtype_to_ctype(self):
if self.convert_datatypes:
return {
'int32':'int',
'float64':'double',
'float32':'float',
'string':'char *',
'bool':'_Bool',
}
else:
return {
'int32':'int32',
'float64':'float64',
'float32':'float32',
'string':'char *',
'bool':'_Bool',
}
@late
def dtype_to_returnvalue(self):
return {
'int32':'0',
'float64':'0.0',
'float32':'0.0',
'string':'0',
'bool':'0',
}
| 10,659
| 28.447514
| 121
|
py
|
amuse
|
amuse-main/src/amuse/rfi/tools/__init__.py
| 0
| 0
| 0
|
py
|
|
amuse
|
amuse-main/src/amuse/rfi/tools/create_code.py
|
import numpy
import sys
import os
from amuse.support.core import late, print_out
from amuse.rfi.core import legacy_function
class DTypeSpec(object):
def __init__(self, input_var_name, output_var_name, counter_name,
type, mpi_type = 'UNKNOWN'):
self.input_var_name = input_var_name
self.output_var_name = output_var_name
self.counter_name = counter_name
self.type = type
self.mpi_type = mpi_type
dtypes = ['int32', 'int64', 'float32', 'float64', 'bool', 'string']
class GenerateASourcecodeString(object):
_result = None
def __init__(self):
pass
@late
def result(self):
if self._result is None:
self.start()
return self._result
@late
def out(self):
return print_out()
@late
def must_generate_mpi(self):
if 'CFLAGS' in os.environ:
return not (os.environ['CFLAGS'].find('-DNOMPI') >= 0)
else:
return True
class GenerateASourcecodeStringFromASpecificationClass(GenerateASourcecodeString):
@late
def interface_functions(self):
attribute_names = dir(self.specification_class)
interface_functions = []
for x in attribute_names:
if x.startswith('__'):
continue
value = getattr(self.specification_class, x)
if isinstance(value, legacy_function):
interface_functions.append(value)
interface_functions.sort(key= lambda x: x.specification.nspec)
return interface_functions
@late
def mapping_from_dtype_to_maximum_number_of_inputvariables(self):
result = None
for x in self.interface_functions:
local = {}
for parameter in x.specification.input_parameters:
count = local.get(parameter.datatype, 0)
local[parameter.datatype] = count + 1
if result is None:
result = local
else:
for key, count in local.items():
previous_count = result.get(key, 0)
result[key] = max(count, previous_count)
return result
@late
def mapping_from_dtype_to_maximum_number_of_outputvariables(self):
result = None
for x in self.interface_functions:
local = {}
for parameter in x.specification.output_parameters:
count = local.get(parameter.datatype, 0)
local[parameter.datatype] = count + 1
if not x.specification.result_type is None:
count = local.get(x.specification.result_type, 0)
local[x.specification.result_type] = count + 1
if result is None:
result = local
else:
for key, count in local.items():
previous_count = result.get(key, 0)
result[key] = max(count, previous_count)
return result
def must_include_interface_function_in_output(self, x):
return True
def output_sourcecode_for_functions(self):
for x in self.interface_functions:
if x.specification.id == 0:
continue
if not self.must_include_interface_function_in_output(x):
continue
self.out.lf()
uc = self.output_sourcecode_for_function()
uc.specification = x.specification
uc.out = self.out
uc.start()
self.out.lf()
class DTypeToSpecDictionary(object):
def __init__(self, dict):
self.mapping = {}
for datatype, value in dict.items():
self.mapping[datatype] = value
def __getitem__(self, datatype):
return self.mapping[datatype]
def __len__(self):
return len(self.mapping)
def values(self):
return list(self.mapping.values()) # python3: maybe remove list
def keys(self):
return list(self.mapping.keys()) # python3: maybe remove list
| 4,264
| 29.683453
| 82
|
py
|
amuse
|
amuse-main/src/amuse/rfi/tools/create_fortran.py
|
from amuse.support.core import late
from amuse.support import exceptions
from amuse import config
from amuse.rfi.tools.create_code import GenerateASourcecodeString
from amuse.rfi.tools.create_code import GenerateASourcecodeStringFromASpecificationClass
from amuse.rfi.tools.create_code import DTypeSpec
from amuse.rfi.tools.create_code import dtypes
from amuse.rfi.tools.create_code import DTypeToSpecDictionary
from amuse.rfi.tools import create_definition
from amuse.rfi.core import LegacyFunctionSpecification
dtype_to_spec = DTypeToSpecDictionary({
'int32' : DTypeSpec('integers_in','integers_out','HEADER_INTEGER_COUNT', 'integer', 'integer'),
'int64' : DTypeSpec('longs_in', 'longs_out', 'HEADER_LONG_COUNT', 'integer*8', 'long'),
'float32' : DTypeSpec('floats_in', 'floats_out', 'HEADER_FLOAT_COUNT', 'real*4', 'float'),
'float64' : DTypeSpec('doubles_in', 'doubles_out', 'HEADER_DOUBLE_COUNT', 'real*8', 'double'),
'bool' : DTypeSpec('booleans_in', 'booleans_out', 'HEADER_BOOLEAN_COUNT', 'logical', 'boolean'),
'string' : DTypeSpec('strings_in', 'strings_out', 'HEADER_STRING_COUNT', 'integer*4', 'integer'),
})
CONSTANTS_STRING = """
integer HEADER_FLAGS, HEADER_CALL_ID, HEADER_FUNCTION_ID, HEADER_CALL_COUNT, &
HEADER_INTEGER_COUNT, HEADER_LONG_COUNT, HEADER_FLOAT_COUNT, &
HEADER_DOUBLE_COUNT, HEADER_BOOLEAN_COUNT, HEADER_STRING_COUNT, &
HEADER_SIZE, MAX_COMMUNICATORS
parameter (HEADER_FLAGS=1, HEADER_CALL_ID=2, HEADER_FUNCTION_ID=3, &
HEADER_CALL_COUNT=4, HEADER_INTEGER_COUNT=5, HEADER_LONG_COUNT=6, &
HEADER_FLOAT_COUNT=7, HEADER_DOUBLE_COUNT=8, &
HEADER_BOOLEAN_COUNT=9, HEADER_STRING_COUNT=10, &
HEADER_SIZE=11, MAX_COMMUNICATORS = 2048)
"""
ARRAY_DEFINES_STRING = """
integer*4, target :: header_in(HEADER_SIZE)
integer*4, target :: header_out(HEADER_SIZE)
integer*4, allocatable, target :: integers_in(:)
integer*4, allocatable, target :: integers_out(:)
integer*8, allocatable, target :: longs_in(:)
integer*8, allocatable, target :: longs_out(:)
real*4, allocatable, target :: floats_in(:)
real*4, allocatable, target :: floats_out(:)
real*8, allocatable, target :: doubles_in(:)
real*8, allocatable, target :: doubles_out(:)
logical*1, allocatable, target :: c_booleans_in(:)
logical*1, allocatable, target :: c_booleans_out(:)
logical, allocatable, target :: booleans_in(:)
logical, allocatable, target :: booleans_out(:)
integer*4, allocatable, target :: string_sizes_in(:)
integer*4, allocatable, target :: string_sizes_out(:)
character (len=256), allocatable, target :: strings_in(:)
character (len=256), allocatable, target :: strings_out(:)
character (len=100000) :: characters_in
character (len=100000) :: characters_out
"""
ISO_ARRAY_DEFINES_STRING = """
integer (c_int32_t), target :: header_in(HEADER_SIZE)
integer (c_int32_t), target :: header_out(HEADER_SIZE)
integer (c_int32_t), allocatable, target :: integers_in(:)
integer (c_int32_t), allocatable, target :: integers_out(:)
integer (c_int64_t), allocatable, target :: longs_in(:)
integer (c_int64_t), allocatable, target :: longs_out(:)
real (c_float), allocatable, target :: floats_in(:)
real (c_float), allocatable, target :: floats_out(:)
real (c_double), allocatable, target :: doubles_in(:)
real (c_double), allocatable, target :: doubles_out(:)
logical (c_bool), allocatable, target :: c_booleans_in(:)
logical (c_bool), allocatable, target :: c_booleans_out(:)
logical, allocatable, target :: booleans_in(:)
logical, allocatable, target :: booleans_out(:)
integer (c_int32_t), allocatable, target :: string_sizes_in(:)
integer (c_int32_t), allocatable, target :: string_sizes_out(:)
character (c_char), allocatable, target :: strings_in(:) * 256
character (c_char), allocatable, target :: strings_out(:) * 256
character (len=1000000) :: characters_in
character (len=1000000) :: characters_out
character (kind=c_char), target :: c_characters_in(1000000)
character (kind=c_char), target :: c_characters_out(1000000)
"""
MODULE_GLOBALS_STRING = """
integer, save :: polling_interval = 0
integer, save :: last_communicator_id = 0
integer, save :: communicators(MAX_COMMUNICATORS)
integer, save :: id_to_activate = -1
integer, save :: active_communicator_id = -1
"""
NOMPI_MODULE_GLOBALS_STRING = """
integer, save :: polling_interval = 0
"""
MPI_INTERNAL_FUNCTIONS_STRING = """
FUNCTION internal__open_port(outval)
USE mpi
IMPLICIT NONE
character(len=MPI_MAX_PORT_NAME+1), intent(out) :: outval
INTEGER :: internal__open_port
INTEGER :: ierror
call MPI_Open_port(MPI_INFO_NULL, outval, ierror);
internal__open_port = 0
END FUNCTION
FUNCTION internal__accept_on_port(port_identifier, comm_identifier)
USE mpi
IMPLICIT NONE
character(len=*), intent(in) :: port_identifier
INTEGER, intent(out) :: comm_identifier
INTEGER :: internal__accept_on_port
INTEGER :: ierror, rank
INTEGER :: mcommunicator, communicator
last_communicator_id = last_communicator_id + 1
IF (last_communicator_id .GE. MAX_COMMUNICATORS) THEN
last_communicator_id = last_communicator_id - 1
comm_identifier = -1
internal__accept_on_port = -1
return;
END IF
call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierror);
IF (rank .EQ. 0) THEN
call MPI_Comm_accept(port_identifier, MPI_INFO_NULL, 0, MPI_COMM_SELF, communicator, ierror)
call MPI_Intercomm_merge(communicator, .FALSE., mcommunicator, ierror)
call MPI_Intercomm_create(MPI_COMM_WORLD, 0, mcommunicator, 1, 65, communicators(last_communicator_id), ierror)
call MPI_Comm_free(mcommunicator, ierror)
call MPI_Comm_free(communicator, ierror)
ELSE
call MPI_Intercomm_create(MPI_COMM_WORLD,0, MPI_COMM_NULL, 1, 65, communicators(last_communicator_id), ierror)
END IF
comm_identifier = last_communicator_id;
internal__accept_on_port = 0
END FUNCTION
FUNCTION internal__connect_to_port(port_identifier, comm_identifier)
USE MPI
IMPLICIT NONE
character(len=*), intent(in) :: port_identifier
INTEGER, intent(out) :: comm_identifier
INTEGER :: internal__connect_to_port
INTEGER :: ierror, rank
INTEGER :: mcommunicator, communicator
last_communicator_id = last_communicator_id + 1
IF (last_communicator_id .GE. MAX_COMMUNICATORS) THEN
last_communicator_id = last_communicator_id - 1
comm_identifier = -1
internal__connect_to_port = -1
return;
END IF
call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierror);
IF (rank .EQ. 0) THEN
call MPI_Comm_connect(port_identifier, MPI_INFO_NULL, 0, MPI_COMM_SELF, communicator, ierror)
call MPI_Intercomm_merge(communicator, .TRUE., mcommunicator, ierror)
call MPI_Intercomm_create(MPI_COMM_WORLD, 0, mcommunicator, 0, 65, communicators(last_communicator_id), ierror)
call MPI_Comm_free(mcommunicator, ierror)
call MPI_Comm_free(communicator, ierror)
ELSE
call MPI_Intercomm_create(MPI_COMM_WORLD,0, MPI_COMM_NULL, 1, 65, communicators(last_communicator_id), ierror)
END IF
comm_identifier = last_communicator_id;
internal__connect_to_port = 0
END FUNCTION
FUNCTION internal__activate_communicator(comm_identifier)
USE mpi
IMPLICIT NONE
INTEGER, intent(in) :: comm_identifier
INTEGER :: internal__activate_communicator
if ((comm_identifier .LT. 0) .OR. (comm_identifier .GT. last_communicator_id)) then
internal__activate_communicator = -1
return
end if
internal__activate_communicator = 0
id_to_activate = comm_identifier
END FUNCTION
FUNCTION internal__become_code(number_of_workers, modulename, classname)
IMPLICIT NONE
character(len=*), intent(in) :: modulename, classname
integer, intent(in) :: number_of_workers
INTEGER :: internal__become_code
internal__become_code = 0
END FUNCTION
"""
NOMPI_INTERNAL_FUNCTIONS_STRING = """
FUNCTION internal__open_port(outval)
IMPLICIT NONE
character(len=*), intent(out) :: outval
INTEGER :: internal__open_port
outval = ""
internal__open_port = 0
END FUNCTION
FUNCTION internal__accept_on_port(port_identifier, comm_identifier)
IMPLICIT NONE
character(len=*), intent(in) :: port_identifier
INTEGER, intent(out) :: comm_identifier
INTEGER :: internal__accept_on_port
comm_identifier = -1;
internal__accept_on_port = 0
END FUNCTION
FUNCTION internal__connect_to_port(port_identifier, comm_identifier)
IMPLICIT NONE
character(len=*), intent(in) :: port_identifier
INTEGER, intent(out) :: comm_identifier
INTEGER :: internal__connect_to_port
comm_identifier = -1
internal__connect_to_port = 0
END FUNCTION
FUNCTION internal__activate_communicator(comm_identifier)
IMPLICIT NONE
INTEGER, intent(in) :: comm_identifier
INTEGER :: internal__activate_communicator
internal__activate_communicator = 0
END FUNCTION
FUNCTION internal__become_code(number_of_workers, modulename, classname)
IMPLICIT NONE
character(len=*), intent(in) :: modulename, classname
integer, intent(in) :: number_of_workers
INTEGER :: internal__become_code
internal__become_code = 0
END FUNCTION
"""
INTERNAL_FUNCTIONS_STRING = MPI_INTERNAL_FUNCTIONS_STRING
POLLING_FUNCTIONS_STRING = """
FUNCTION internal__get_message_polling_interval(outval)
INTEGER,intent(out) :: outval
INTEGER :: internal__get_message_polling_interval
outval = polling_interval
internal__get_message_polling_interval = 0
END FUNCTION
FUNCTION internal__set_message_polling_interval(inval)
INTEGER,intent(in) :: inval
INTEGER :: internal__set_message_polling_interval
polling_interval = inval
internal__set_message_polling_interval = 0
END FUNCTION
"""
RECV_HEADER_SLEEP_STRING = """
SUBROUTINE mpi_recv_header(parent, ioerror)
use iso_c_binding
use mpi
implicit none
integer,intent(in) :: parent
integer,intent(inout) :: ioerror
integer :: request_status(MPI_STATUS_SIZE),header_request
logical is_finished
INTERFACE
INTEGER (C_INT) FUNCTION usleep(useconds) bind(C)
!SUBROUTINE usleep(useconds) bind(C)
use iso_c_binding
implicit none
INTEGER(c_int32_t), value :: useconds
END
END INTERFACE
call MPI_Irecv(header_in, HEADER_SIZE, MPI_INTEGER, 0, 989, parent, header_request, ioerror)
if(polling_interval.GT.0) then
is_finished = .false.
call MPI_Test(header_request, is_finished, request_status, ioerror)
DO WHILE(.NOT. is_finished)
ioerror = usleep(int(polling_interval, c_int32_t))
call MPI_Test(header_request, is_finished, request_status, ioerror)
END DO
call MPI_Wait(header_request, request_status, ioerror)
else
call MPI_Wait(header_request, request_status, ioerror)
endif
END SUBROUTINE
"""
RECV_HEADER_WAIT_STRING = """
SUBROUTINE mpi_recv_header(parent, ioerror)
use mpi
implicit none
integer,intent(in) :: parent
integer,intent(inout) :: ioerror
integer :: request_status(MPI_STATUS_SIZE),header_request
call MPI_Irecv(header_in, HEADER_SIZE, MPI_INTEGER, 0, 989, parent, header_request, ioerror)
call MPI_Wait(header_request, request_status, ioerror)
END SUBROUTINE
"""
EMPTY_RUN_LOOP_MPI_STRING = """
SUBROUTINE run_loop_mpi
implicit none
END SUBROUTINE
"""
RUN_LOOP_MPI_STRING = """
SUBROUTINE run_loop_mpi
use mpi
implicit none
integer :: provided
integer :: rank, parent, ioerror, max_call_count = 255
integer :: must_run_loop, maximum_size, total_string_length
integer i, offset, call_count
call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, provided, ioerror)
ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN))
ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT))
ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN))
ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT))
ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN))
ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT))
ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN))
ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT))
ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(string_sizes_out(max_call_count * MAX_STRINGS_OUT))
ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN))
!ensure there is at least one string to return an error code in
ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT)))
call MPI_COMM_GET_PARENT(parent, ioerror)
call MPI_COMM_RANK(parent, rank, ioerror)
last_communicator_id = last_communicator_id + 1
communicators(1) = parent
active_communicator_id = 1
must_run_loop = 1
do while (must_run_loop .eq. 1)
if ((id_to_activate .GE. 0) .AND. (id_to_activate .NE. active_communicator_id)) then
active_communicator_id = id_to_activate
id_to_activate = -1
parent = communicators(active_communicator_id)
call MPI_COMM_RANK(parent, rank, ioerror)
end if
call mpi_recv_header(parent, ioerror)
!print*, 'fortran: got header ', header_in
call_count = header_in(HEADER_CALL_COUNT)
IF (call_count .gt. max_call_count) THEN
max_call_count = call_count + 255;
DEALLOCATE(integers_in)
DEALLOCATE(integers_out)
DEALLOCATE(longs_in)
DEALLOCATE(longs_out)
DEALLOCATE(floats_in)
DEALLOCATE(floats_out)
DEALLOCATE(doubles_in)
DEALLOCATE(doubles_out)
DEALLOCATE(c_booleans_in)
DEALLOCATE(c_booleans_out)
DEALLOCATE(booleans_in)
DEALLOCATE(booleans_out)
DEALLOCATE(string_sizes_in)
DEALLOCATE(string_sizes_out)
DEALLOCATE(strings_in)
DEALLOCATE(strings_out)
ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN))
ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT))
ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN))
ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT))
ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN))
ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT))
ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN))
ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT))
ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(string_sizes_out(max_call_count * MAX_STRINGS_OUT))
ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT)))
END IF
if (header_in(HEADER_INTEGER_COUNT) .gt. 0) then
call MPI_BCast(integers_in, header_in(HEADER_INTEGER_COUNT), MPI_INTEGER, 0, parent, ioError);
end if
if (header_in(HEADER_LONG_COUNT) .gt. 0) then
call MPI_BCast(longs_in, header_in(HEADER_LONG_COUNT), MPI_INTEGER8, 0, parent, ioError);
end if
if (header_in(HEADER_FLOAT_COUNT) .gt. 0) then
call MPI_BCast(floats_in, header_in(HEADER_FLOAT_COUNT), MPI_REAL, 0, parent, ioError);
end if
if (header_in(HEADER_DOUBLE_COUNT) .gt. 0) then
call MPI_BCast(doubles_in, header_in(HEADER_DOUBLE_COUNT), MPI_REAL8, 0, parent, ioError);
end if
if (header_in(HEADER_BOOLEAN_COUNT) .gt. 0) then
! some older MPI do not define MPI_C_BOOL; this seems to work ok
! maybe booleans_in in this call should be replaced by char (more portable) or logical*1
call MPI_BCast(c_booleans_in, header_in(HEADER_BOOLEAN_COUNT), MPI_BYTE, 0, parent, ioError);
do i=1,header_in(HEADER_BOOLEAN_COUNT)
booleans_in(i)=logical(c_booleans_in(i))
enddo
end if
if (header_in(HEADER_STRING_COUNT) .gt. 0) then
strings_in = ' '
call MPI_BCast(string_sizes_in, header_in(HEADER_STRING_COUNT), MPI_INTEGER, 0, parent, ioError);
maximum_size = 0
total_string_length = 0
do i = 1, header_in(HEADER_STRING_COUNT), 1
total_string_length = total_string_length + string_sizes_in(i) + 1
if (string_sizes_in(i) .gt. maximum_size) then
maximum_size = string_sizes_in(i)
end if
end do
if(maximum_size.GT.256) then
print*, "fortran_worker reports too large string"
stop
endif
if(total_string_length.GT.1000000) then
print*, "fortran_worker reports too large string message"
stop
endif
call MPI_BCast(characters_in, total_string_length, MPI_CHARACTER, 0, parent, ioError);
offset = 1
do i = 1, header_in(HEADER_STRING_COUNT), 1
strings_in(i) = ' '
strings_in(i) = characters_in(offset : (offset + string_sizes_in(i)))
strings_in(i)((string_sizes_in(i) + 1):(string_sizes_in(i) + 1)) = ' '
offset = offset + string_sizes_in(i) + 1
!print*, 'fortran: strings_in(i) ', i, strings_in(i) , ' of length ', string_sizes_in(i), &
!' actually of size ', len_trim(strings_in(i))
end do
end if
header_out = 0
header_out(HEADER_CALL_ID) = header_in(HEADER_CALL_ID)
header_out(HEADER_FUNCTION_ID) = header_in(HEADER_FUNCTION_ID)
header_out(HEADER_CALL_COUNT) = header_in(HEADER_CALL_COUNT)
strings_out = ' '
must_run_loop = handle_call()
!print*, 'fortran: sending header ', header_out
if (rank .eq. 0 ) then
call MPI_SEND(header_out, HEADER_SIZE, MPI_INTEGER, 0, 999, parent, ioerror);
if (header_out(HEADER_INTEGER_COUNT) .gt. 0) then
call MPI_SEND(integers_out, header_out(HEADER_INTEGER_COUNT), MPI_INTEGER, 0, 999, parent, ioerror)
end if
if (header_out(HEADER_LONG_COUNT) .gt. 0) then
call MPI_SEND(longs_out, header_out(HEADER_LONG_COUNT), MPI_INTEGER8, 0, 999, parent, ioerror)
end if
if (header_out(HEADER_FLOAT_COUNT) .gt. 0) then
call MPI_SEND(floats_out, header_out(HEADER_FLOAT_COUNT), MPI_REAL, 0, 999, parent, ioerror)
end if
if (header_out(HEADER_DOUBLE_COUNT) .gt. 0) then
call MPI_SEND(doubles_out, header_out(HEADER_DOUBLE_COUNT), MPI_REAL8, 0, 999, parent, ioerror)
end if
if (header_out(HEADER_BOOLEAN_COUNT) .gt. 0) then
do i=1,header_out(HEADER_BOOLEAN_COUNT)
c_booleans_out(i)=booleans_out(i)
enddo
call MPI_SEND(c_booleans_out, header_out(HEADER_BOOLEAN_COUNT), MPI_BYTE, 0, 999, parent, ioerror)
end if
if (header_out(HEADER_STRING_COUNT) .gt. 0) then
offset = 1
do i = 1, header_out(HEADER_STRING_COUNT),1
string_sizes_out(i) = len_trim(strings_out(i))
!print*, 'fortran: sending strings, strings_out(i) ', i, strings_out(i) , ' of length ', string_sizes_out(i), &
!' actually of size ', len_trim(strings_out(i))
characters_out(offset:offset+string_sizes_out(i)) = strings_out(i)
offset = offset + string_sizes_out(i) + 1
characters_out(offset-1:offset-1) = char(0)
end do
total_string_length=offset-1
if(total_string_length.GT.1000000) then
print*, "fortran_worker reports too large string message"
stop
endif
call MPI_SEND(string_sizes_out, header_out(HEADER_STRING_COUNT), MPI_INTEGER, 0, 999, parent, ioerror)
call MPI_SEND(characters_out, offset -1, MPI_CHARACTER, 0, 999, parent, ioerror)
end if
end if
end do
DEALLOCATE(integers_in)
DEALLOCATE(integers_out)
DEALLOCATE(longs_in)
DEALLOCATE(longs_out)
DEALLOCATE(floats_in)
DEALLOCATE(floats_out)
DEALLOCATE(doubles_in)
DEALLOCATE(doubles_out)
DEALLOCATE(booleans_in)
DEALLOCATE(booleans_out)
DEALLOCATE(string_sizes_in)
DEALLOCATE(string_sizes_out)
DEALLOCATE(strings_in)
DEALLOCATE(strings_out)
do i = 1, last_communicator_id, 1
call MPI_COMM_DISCONNECT(communicators(i), ioerror);
end do
call MPI_FINALIZE(ioerror)
return
end subroutine
"""
RUN_LOOP_SOCKETS_STRING = """
SUBROUTINE run_loop_sockets
use iso_c_binding
use FortranSocketsInterface
implicit none
integer :: max_call_count = 255
integer :: must_run_loop, maximum_size, total_string_length
integer :: i, offset, call_count, port
character(len=32) :: port_string
character(kind=c_char, len=64) :: host
logical (c_bool), allocatable, target :: c_booleans_in(:)
logical (c_bool), allocatable, target :: c_booleans_out(:)
ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN))
ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT))
ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN))
ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT))
ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN))
ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT))
ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN))
ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT))
ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN))
!ensure there is at least one string to return an error code in
ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT)))
ALLOCATE(string_sizes_out(max(1, max_call_count * MAX_STRINGS_OUT)))
call get_command_argument(1, port_string)
call get_command_argument(2, host)
read (port_string,*) port
!add a null character to the end of the string so c knows when the string ends
host = trim(host) // c_null_char
call forsockets_init(host, port)
must_run_loop = 1
do while (must_run_loop .eq. 1)
call receive_integers(c_loc(header_in), HEADER_SIZE)
!print*, 'fortran sockets: got header ', header_in
call_count = header_in(HEADER_CALL_COUNT)
IF (call_count .gt. max_call_count) THEN
max_call_count = call_count + 255;
DEALLOCATE(integers_in)
DEALLOCATE(integers_out)
DEALLOCATE(longs_in)
DEALLOCATE(longs_out)
DEALLOCATE(floats_in)
DEALLOCATE(floats_out)
DEALLOCATE(doubles_in)
DEALLOCATE(doubles_out)
DEALLOCATE(booleans_in)
DEALLOCATE(booleans_out)
DEALLOCATE(c_booleans_in)
DEALLOCATE(c_booleans_out)
DEALLOCATE(string_sizes_in)
DEALLOCATE(string_sizes_out)
DEALLOCATE(strings_in)
DEALLOCATE(strings_out)
ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN))
ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT))
ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN))
ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT))
ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN))
ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT))
ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN))
ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT))
ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(string_sizes_out(max_call_count * MAX_STRINGS_OUT))
ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT)))
END IF
if (header_in(HEADER_INTEGER_COUNT) .gt. 0) then
call receive_integers(c_loc(integers_in), header_in(HEADER_INTEGER_COUNT))
end if
if (header_in(HEADER_LONG_COUNT) .gt. 0) then
call receive_longs(c_loc(longs_in), header_in(HEADER_LONG_COUNT))
end if
if (header_in(HEADER_FLOAT_COUNT) .gt. 0) then
call receive_floats(c_loc(floats_in), header_in(HEADER_FLOAT_COUNT))
end if
if (header_in(HEADER_DOUBLE_COUNT) .gt. 0) then
call receive_doubles(c_loc(doubles_in), header_in(HEADER_DOUBLE_COUNT))
end if
if (header_in(HEADER_BOOLEAN_COUNT) .gt. 0) then
call receive_booleans(c_loc(c_booleans_in), header_in(HEADER_BOOLEAN_COUNT))
do i = 1, header_in(HEADER_BOOLEAN_COUNT), 1
booleans_in(i) = logical(c_booleans_in(i))
end do
end if
if (header_in(HEADER_STRING_COUNT) .gt. 0) then
strings_in = ' '
call receive_integers(c_loc(string_sizes_in), header_in(HEADER_STRING_COUNT))
maximum_size = 0
total_string_length = 0
do i = 1, header_in(HEADER_STRING_COUNT), 1
total_string_length = total_string_length + string_sizes_in(i) + 1
if (string_sizes_in(i) .gt. maximum_size) then
maximum_size = string_sizes_in(i)
end if
end do
if(maximum_size.GT.256) then
print*, "fortran_worker reports too large string"
stop
endif
if(total_string_length.GT.1000000) then
print*, "fortran_worker reports too large string message"
stop
endif
call receive_string(c_loc(c_characters_in), total_string_length)
! this trick is necessary on older gfortran compilers (~<4.9)
! as c_loc needs character(len=1)
do i=1, total_string_length
characters_in(i:i)=c_characters_in(i)
enddo
offset = 1
do i = 1, header_in(HEADER_STRING_COUNT), 1
strings_in(i) = ' '
strings_in(i) = characters_in(offset : (offset + string_sizes_in(i)))
strings_in(i)((string_sizes_in(i) + 1):(string_sizes_in(i) + 1)) = ' '
offset = offset + string_sizes_in(i) + 1
!print*, 'fortran: strings_in(i) ', i, strings_in(i) , ' of length ', string_sizes_in(i), &
!' actually of size ', len_trim(strings_in(i))
end do
end if
header_out = 0
header_out(HEADER_CALL_ID) = header_in(HEADER_CALL_ID)
header_out(HEADER_FUNCTION_ID) = header_in(HEADER_FUNCTION_ID)
header_out(HEADER_CALL_COUNT) = header_in(HEADER_CALL_COUNT)
strings_out = ' '
must_run_loop = handle_call()
!print*, 'fortran: sending header ', header_out
call send_integers(c_loc(header_out), HEADER_SIZE)
if (header_out(HEADER_INTEGER_COUNT) .gt. 0) then
call send_integers(c_loc(integers_out), header_out(HEADER_INTEGER_COUNT))
end if
if (header_out(HEADER_LONG_COUNT) .gt. 0) then
call send_longs(c_loc(longs_out), header_out(HEADER_LONG_COUNT))
end if
if (header_out(HEADER_FLOAT_COUNT) .gt. 0) then
call send_floats(c_loc(floats_out), header_out(HEADER_FLOAT_COUNT))
end if
if (header_out(HEADER_DOUBLE_COUNT) .gt. 0) then
call send_doubles(c_loc(doubles_out), header_out(HEADER_DOUBLE_COUNT))
end if
if (header_out(HEADER_BOOLEAN_COUNT) .gt. 0) then
do i = 1, header_out(HEADER_BOOLEAN_COUNT), 1
c_booleans_out(i) = logical(booleans_out(i), c_bool)
end do
call send_booleans(c_loc(c_booleans_out), header_out(HEADER_BOOLEAN_COUNT))
end if
if (header_out(HEADER_STRING_COUNT) .gt. 0) then
offset = 1
do i = 1, header_out(HEADER_STRING_COUNT),1
string_sizes_out(i) = len_trim(strings_out(i))
!print*, 'fortran: sending strings, strings_out(i) ', i, strings_out(i) , ' of length ', string_sizes_out(i), &
!' actually of size ', len_trim(strings_out(i))
characters_out(offset:offset+string_sizes_out(i)) = strings_out(i)
offset = offset + string_sizes_out(i) + 1
characters_out(offset-1:offset-1) = char(0)
end do
total_string_length=offset-1
if(total_string_length.GT.1000000) then
print*, "fortran_worker reports too large string message"
stop
endif
do i=1, total_string_length
c_characters_out(i)=characters_out(i:i)
enddo
call send_integers(c_loc(string_sizes_out), header_out(HEADER_STRING_COUNT))
call send_string(c_loc(c_characters_out), offset-1 )
end if
end do
DEALLOCATE(integers_in)
DEALLOCATE(integers_out)
DEALLOCATE(longs_in)
DEALLOCATE(longs_out)
DEALLOCATE(floats_in)
DEALLOCATE(floats_out)
DEALLOCATE(doubles_in)
DEALLOCATE(doubles_out)
DEALLOCATE(booleans_in)
DEALLOCATE(booleans_out)
DEALLOCATE(c_booleans_in)
DEALLOCATE(c_booleans_out)
DEALLOCATE(string_sizes_in)
DEALLOCATE(string_sizes_out)
DEALLOCATE(strings_in)
DEALLOCATE(strings_out)
call forsockets_close()
return
end subroutine
"""
EMPTY_RUN_LOOP_SOCKETS_STRING = """
subroutine run_loop_sockets
print*, 'fortran: sockets channel not supported in this worker'
return
end subroutine
"""
RUN_LOOP_SOCKETS_MPI_STRING = """
SUBROUTINE run_loop_sockets_mpi
use iso_c_binding
use FortranSocketsInterface
use mpi
implicit none
integer :: provided
integer :: max_call_count = 255
integer :: must_run_loop, maximum_size, total_string_length
integer :: i, offset, call_count, port, rank, ioerror
character(len=32) :: port_string
character(kind=c_char, len=64) :: host
logical (c_bool), allocatable, target :: c_booleans_in(:)
logical (c_bool), allocatable, target :: c_booleans_out(:)
ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN))
ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT))
ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN))
ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT))
ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN))
ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT))
ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN))
ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT))
ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN))
!ensure there is at least one string to return an error code in
ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT)))
ALLOCATE(string_sizes_out(max(1, max_call_count * MAX_STRINGS_OUT)))
call mpi_init_thread(mpi_thread_multiple, provided, ioerror)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ioerror)
if (rank .eq. 0) then
call get_command_argument(1, port_string)
call get_command_argument(2, host)
read (port_string,*) port
!add a null character to the end of the string so c knows when the string ends
host = trim(host) // c_null_char
call forsockets_init(host, port)
end if
must_run_loop = 1
do while (must_run_loop .eq. 1)
if (rank .eq. 0) then
call receive_integers(c_loc(header_in), HEADER_SIZE)
end if
call MPI_BCast(header_in, HEADER_SIZE , MPI_INTEGER, 0, MPI_COMM_WORLD, ioerror)
!print*, 'fortran sockets mpi: got header ', header_in
call_count = header_in(HEADER_CALL_COUNT)
IF (call_count .gt. max_call_count) THEN
max_call_count = call_count + 255;
DEALLOCATE(integers_in)
DEALLOCATE(integers_out)
DEALLOCATE(longs_in)
DEALLOCATE(longs_out)
DEALLOCATE(floats_in)
DEALLOCATE(floats_out)
DEALLOCATE(doubles_in)
DEALLOCATE(doubles_out)
DEALLOCATE(booleans_in)
DEALLOCATE(booleans_out)
DEALLOCATE(c_booleans_in)
DEALLOCATE(c_booleans_out)
DEALLOCATE(string_sizes_in)
DEALLOCATE(string_sizes_out)
DEALLOCATE(strings_in)
DEALLOCATE(strings_out)
ALLOCATE(integers_in(max_call_count * MAX_INTEGERS_IN))
ALLOCATE(integers_out(max_call_count * MAX_INTEGERS_OUT))
ALLOCATE(longs_in(max_call_count * MAX_LONGS_IN))
ALLOCATE(longs_out(max_call_count * MAX_LONGS_OUT))
ALLOCATE(floats_in(max_call_count * MAX_FLOATS_IN))
ALLOCATE(floats_out(max_call_count * MAX_FLOATS_OUT))
ALLOCATE(doubles_in(max_call_count * MAX_DOUBLES_IN))
ALLOCATE(doubles_out(max_call_count * MAX_DOUBLES_OUT))
ALLOCATE(booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(c_booleans_in(max_call_count * MAX_BOOLEANS_IN))
ALLOCATE(c_booleans_out(max_call_count * MAX_BOOLEANS_OUT))
ALLOCATE(string_sizes_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(string_sizes_out(max_call_count * MAX_STRINGS_OUT))
ALLOCATE(strings_in(max_call_count * MAX_STRINGS_IN))
ALLOCATE(strings_out(max(1, max_call_count * MAX_STRINGS_OUT)))
END IF
if (header_in(HEADER_INTEGER_COUNT) .gt. 0) then
if (rank .eq. 0) then
call receive_integers(c_loc(integers_in), header_in(HEADER_INTEGER_COUNT))
end if
call MPI_BCast(integers_in, header_in(HEADER_INTEGER_COUNT), MPI_INTEGER, 0, MPI_COMM_WORLD, ioError);
end if
if (header_in(HEADER_LONG_COUNT) .gt. 0) then
if (rank .eq. 0) then
call receive_longs(c_loc(longs_in), header_in(HEADER_LONG_COUNT))
end if
call MPI_BCast(longs_in, header_in(HEADER_LONG_COUNT), MPI_INTEGER8, 0, MPI_COMM_WORLD, ioError);
end if
if (header_in(HEADER_FLOAT_COUNT) .gt. 0) then
if (rank .eq. 0) then
call receive_floats(c_loc(floats_in), header_in(HEADER_FLOAT_COUNT))
end if
call MPI_BCast(floats_in, header_in(HEADER_FLOAT_COUNT), MPI_REAL, 0, MPI_COMM_WORLD, ioerror)
end if
if (header_in(HEADER_DOUBLE_COUNT) .gt. 0) then
if (rank .eq. 0) then
call receive_doubles(c_loc(doubles_in), header_in(HEADER_DOUBLE_COUNT))
end if
call MPI_BCast(doubles_in, header_in(HEADER_DOUBLE_COUNT), MPI_REAL8, 0, MPI_COMM_WORLD, ioerror)
end if
if (header_in(HEADER_BOOLEAN_COUNT) .gt. 0) then
if (rank .eq. 0) then
call receive_booleans(c_loc(c_booleans_in), header_in(HEADER_BOOLEAN_COUNT))
do i = 1, header_in(HEADER_BOOLEAN_COUNT), 1
booleans_in(i) = logical(c_booleans_in(i))
end do
end if
call MPI_BCast(booleans_in, header_in(HEADER_BOOLEAN_COUNT), MPI_LOGICAL, 0, MPI_COMM_WORLD, ioerror)
end if
if (header_in(HEADER_STRING_COUNT) .gt. 0) then
strings_in = ' '
if (rank .eq. 0) then
call receive_integers(c_loc(string_sizes_in), header_in(HEADER_STRING_COUNT))
end if
call MPI_BCast(string_sizes_in, header_in(HEADER_STRING_COUNT), MPI_INTEGER, 0, MPI_COMM_WORLD, ioError);
maximum_size = 0
total_string_length = 0
do i = 1, header_in(HEADER_STRING_COUNT), 1
total_string_length = total_string_length + string_sizes_in(i) + 1
if (string_sizes_in(i) .gt. maximum_size) then
maximum_size = string_sizes_in(i)
end if
end do
if(maximum_size.GT.256) then
print*, "fortran_worker reports too large string"
stop
endif
if(total_string_length.GT.1000000) then
print*, "fortran_worker reports too large string message"
stop
endif
if (rank .eq. 0) then
call receive_string(c_loc(c_characters_in), total_string_length)
endif
do i=1, total_string_length
characters_in(i:i)=c_characters_in(i)
enddo
call MPI_BCast(characters_in, total_string_length, MPI_CHARACTER, 0, MPI_COMM_WORLD, ioError);
offset = 1
do i = 1, header_in(HEADER_STRING_COUNT), 1
strings_in(i) = ' '
strings_in(i) = characters_in(offset : (offset + string_sizes_in(i)))
strings_in(i)((string_sizes_in(i) + 1):(string_sizes_in(i) + 1)) = ' '
offset = offset + string_sizes_in(i) + 1
!print*, 'fortran: strings_in(i) ', i, strings_in(i) , ' of length ', string_sizes_in(i), &
!' actually of size ', len_trim(strings_in(i))
end do
end if
header_out = 0
header_out(HEADER_CALL_ID) = header_in(HEADER_CALL_ID)
header_out(HEADER_FUNCTION_ID) = header_in(HEADER_FUNCTION_ID)
header_out(HEADER_CALL_COUNT) = header_in(HEADER_CALL_COUNT)
strings_out = ' '
must_run_loop = handle_call()
call MPI_Barrier(MPI_COMM_WORLD, ioerror)
if (rank .eq. 0) then
!print*, 'fortran: sending header ', header_out
call send_integers(c_loc(header_out), HEADER_SIZE)
if (header_out(HEADER_INTEGER_COUNT) .gt. 0) then
call send_integers(c_loc(integers_out), header_out(HEADER_INTEGER_COUNT))
end if
if (header_out(HEADER_LONG_COUNT) .gt. 0) then
call send_longs(c_loc(longs_out), header_out(HEADER_LONG_COUNT))
end if
if (header_out(HEADER_FLOAT_COUNT) .gt. 0) then
call send_floats(c_loc(floats_out), header_out(HEADER_FLOAT_COUNT))
end if
if (header_out(HEADER_DOUBLE_COUNT) .gt. 0) then
call send_doubles(c_loc(doubles_out), header_out(HEADER_DOUBLE_COUNT))
end if
if (header_out(HEADER_BOOLEAN_COUNT) .gt. 0) then
do i = 1, header_out(HEADER_BOOLEAN_COUNT), 1
c_booleans_out(i) = logical(booleans_out(i), c_bool)
!print*, 'fortran sockets mpi: sending boolean', booleans_out(i) , i, ' send as ', c_booleans_out(i)
end do
call send_booleans(c_loc(c_booleans_out), header_out(HEADER_BOOLEAN_COUNT))
end if
if (header_out(HEADER_STRING_COUNT) .gt. 0) then
offset = 1
do i = 1, header_out(HEADER_STRING_COUNT),1
string_sizes_out(i) = len_trim(strings_out(i))
!print*, 'fortran: sending strings, strings_out(i) ', i, strings_out(i) , ' of length ', string_sizes_out(i), &
!' actually of size ', len_trim(strings_out(i))
characters_out(offset:offset+string_sizes_out(i)) = strings_out(i)
offset = offset + string_sizes_out(i) + 1
characters_out(offset-1:offset-1) = char(0)
end do
total_string_length=offset-1
if(total_string_length.GT.1000000) then
print*, "fortran_Worker reports too large string message"
stop
endif
do i=1, total_string_length
c_characters_out(i)=characters_out(i:i)
enddo
call send_integers(c_loc(string_sizes_out), header_out(HEADER_STRING_COUNT))
call send_string(c_loc(c_characters_out), offset-1 )
end if
end if
end do
DEALLOCATE(integers_in)
DEALLOCATE(integers_out)
DEALLOCATE(longs_in)
DEALLOCATE(longs_out)
DEALLOCATE(floats_in)
DEALLOCATE(floats_out)
DEALLOCATE(doubles_in)
DEALLOCATE(doubles_out)
DEALLOCATE(booleans_in)
DEALLOCATE(booleans_out)
DEALLOCATE(string_sizes_in)
DEALLOCATE(string_sizes_out)
DEALLOCATE(strings_in)
DEALLOCATE(strings_out)
if (rank .eq. 0) then
call forsockets_close()
end if
call MPI_FINALIZE(ioerror)
return
end subroutine
"""
EMPTY_RUN_LOOP_SOCKETS_MPI_STRING = """
subroutine run_loop_sockets_mpi
print*, 'fortran: sockets channel not supported in this worker'
return
end subroutine
"""
MAIN_STRING = """
integer :: count
logical :: use_mpi
character(len=32) :: use_mpi_string
count = command_argument_count()
use_mpi = NEEDS_MPI
if (count .eq. 0) then
call run_loop_mpi()
else if (count .eq. 3) then
call get_command_argument(3, use_mpi_string)
if (use_mpi_string .eq. 'true') then
use_mpi = .true.
else if (use_mpi_string .eq. 'false') then
use_mpi = .false.
else
print*, 'fortran worker: need either true or false as mpi enable arguments, not', use_mpi_string
stop
end if
if (use_mpi) then
call run_loop_sockets_mpi()
else
call run_loop_sockets()
end if
else
print*, 'fortran worker: need either 0 or 3 arguments, not', count
stop
end if
"""
GETSET_WORKING_DIRECTORY="""
function set_working_directory(directory) result(ret)
{0}
integer :: ret
character(*), intent(in) :: directory
ret = chdir(directory)
end function
function get_working_directory(directory) result(ret)
{0}
integer :: ret
character(*), intent(out) :: directory
ret = getcwd(directory)
end function
"""
class GenerateAFortranStringOfAFunctionSpecification(GenerateASourcecodeString):
MAX_STRING_LEN = 256
@late
def specification(self):
raise exceptions.AmuseException("No specification set, please set the specification first")
@late
def underscore_functions_from_specification_classes(self):
return []
@late
def dtype_to_spec(self):
return dtype_to_spec
def index_string(self, index, must_copy_in_to_out = False):
if self.specification.must_handle_array and not must_copy_in_to_out:
if index == 0:
return '1'
else:
return '( %d * call_count) + 1' % (index )
elif self.specification.can_handle_array or (self.specification.must_handle_array and must_copy_in_to_out):
if index == 0:
return 'i'
else:
if index == -1:
return "i - 1"
else:
return '( %d * call_count) + i' % index
else:
return index + 1
def start(self):
self.specification.prepare_output_parameters()
self.output_casestmt_start()
self.out.indent()
#self.output_lines_before_with_clear_out_variables()
#self.output_lines_before_with_clear_input_variables()
if self.specification.must_handle_array:
pass
elif self.specification.can_handle_array:
self.out.lf() + 'do i = 1, call_count, 1'
self.out.indent()
#self.output_lines_before_with_inout_variables()
self.output_function_start()
self.output_function_parameters()
self.output_function_end()
self.output_lines_with_inout_variables()
if self.specification.must_handle_array:
if not self.specification.result_type is None:
spec = self.dtype_to_spec[self.specification.result_type]
self.out.lf() + 'DO i = 2, call_count'
self.out.indent()
self.out.lf() + spec.output_var_name + '(i)' + ' = ' + spec.output_var_name + '(1)'
self.out.dedent()
self.out.lf() + 'END DO'
elif self.specification.can_handle_array:
self.out.dedent()
self.out.lf() + 'end do'
self.output_lines_with_number_of_outputs()
self.output_casestmt_end()
self.out.dedent()
self._result = self.out.string
def output_function_parameters(self):
self.out.indent()
first = True
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if first:
first = False
self.out + ' &'
else:
self.out + ' ,&'
if parameter.direction == LegacyFunctionSpecification.IN:
# if parameter.datatype == 'string':
# self.out.n() + 'input_characters('
# self.out + '( (' + self.index_string(parameter.input_index) + ')* ' + self.MAX_STRING_LEN + ')'
# self.out + ':' + '(((' + self.index_string(parameter.input_index) + ')* ' + self.MAX_STRING_LEN + ') +'
# self.out + '(' + spec.input_var_name + '(' + self.index_string(parameter.input_index) + ')' + '-'
# self.out + 'get_offset(' + self.index_string(parameter.input_index) + ' - 1 , '+spec.input_var_name +') ))'
# self.out + ')'
# else:
if parameter.datatype == 'string':
self.out.n() + 'strings_in(' + self.index_string(parameter.input_index) + ')'
else:
self.out.n() + spec.input_var_name
self.out + '(' + self.index_string(parameter.input_index) + ')'
if parameter.direction == LegacyFunctionSpecification.INOUT:
# if parameter.datatype == 'string':
# self.out.n() + 'output_characters('
# self.out + '((' + self.index_string(parameter.output_index) + ')* ' + self.MAX_STRING_LEN + ')'
# self.out + ':' + '(((' + self.index_string(parameter.output_index) + ')+1) * ' + self.MAX_STRING_LEN + ' - 1)'
# self.out + ')'
# else:
# if parameter.datatype == 'string':
# self.out.n() + spec.input_var_name
# self.out + '(' + self.index_string(parameter.input_index) + ', :)'
# else:
self.out.n() + spec.input_var_name
self.out + '(' + self.index_string(parameter.input_index) + ')'
elif parameter.direction == LegacyFunctionSpecification.OUT:
# if parameter.datatype == 'string':
# self.out.n() + 'output_characters('
# self.out + '((' + self.index_string(parameter.output_index) + ')* ' + self.MAX_STRING_LEN + ')'
# self.out + ':' + '(((' + self.index_string(parameter.output_index) + ')+1) * ' + self.MAX_STRING_LEN + ' - 1)'
# self.out + ')'
# else:
# if parameter.datatype == 'string':
# self.out.n() + spec.output_var_name
# self.out + '(' + self.index_string(parameter.output_index) + ')(1:50)'
# else:
self.out.n() + spec.output_var_name
self.out + '(' + self.index_string(parameter.output_index) + ')'
elif parameter.direction == LegacyFunctionSpecification.LENGTH:
self.out.n() + 'call_count'
self.out.dedent()
def output_lines_with_inout_variables(self):
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if parameter.direction == LegacyFunctionSpecification.INOUT:
if self.specification.must_handle_array:
self.out.lf() + 'DO i = 1, call_count'
self.out.indent()
self.out.n() + spec.output_var_name
self.out + '(' + self.index_string(parameter.output_index, must_copy_in_to_out = True) + ')'
self.out + ' = '
self.out + spec.input_var_name + '(' + self.index_string(parameter.input_index, must_copy_in_to_out = True) + ')'
if self.specification.must_handle_array:
self.out.dedent()
self.out.lf() + 'END DO'
def output_lines_before_with_clear_out_variables(self):
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if parameter.is_output():
if parameter.datatype == 'string':
self.out.lf() + 'output_characters = "x"'
return
def output_lines_before_with_clear_input_variables(self):
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if parameter.is_input():
if parameter.datatype == 'string':
self.out.lf() + 'input_characters = "x"'
return
def output_lines_before_with_inout_variables(self):
for parameter in self.specification.parameters:
spec = self.dtype_to_spec[parameter.datatype]
if parameter.direction == LegacyFunctionSpecification.IN:
if parameter.datatype == 'string':
self.out.n() + 'input_characters('
self.out + '( (' + self.index_string(parameter.input_index) + ')* ' + self.MAX_STRING_LEN + ')'
self.out + ':' + '(((' + self.index_string(parameter.input_index) + ')+1) * ' + self.MAX_STRING_LEN + ' - 1)'
self.out + ') = &'
self.out.lf()
self.out + 'characters('
self.out + 'get_offset(' + self.index_string(parameter.input_index) + ' - 1 , '+spec.input_var_name +')'
self.out + ':' + spec.input_var_name + '(' + self.index_string(parameter.input_index) + ')'
self.out + ')'
if parameter.direction == LegacyFunctionSpecification.INOUT:
if parameter.datatype == 'string':
self.out.n() + 'output_characters('
self.out + '( (' + self.index_string(parameter.output_index) + ')* ' + self.MAX_STRING_LEN + ')'
self.out + ':' + '(((' + self.index_string(parameter.output_index) + ')+1) * ' + self.MAX_STRING_LEN + ' - 1)'
self.out + ') = &'
self.out.lf()
self.out + 'characters('
self.out + 'get_offset(' + self.index_string(parameter.input_index) + ' - 1 , '+spec.input_var_name +')'
self.out + ':' + spec.input_var_name + '(' + self.index_string(parameter.input_index) + ')'
self.out + ')'
def output_lines_with_number_of_outputs(self):
dtype_to_count = {}
for parameter in self.specification.output_parameters:
count = dtype_to_count.get(parameter.datatype, 0)
dtype_to_count[parameter.datatype] = count + 1
if not self.specification.result_type is None:
count = dtype_to_count.get(self.specification.result_type, 0)
dtype_to_count[self.specification.result_type] = count + 1
for dtype in dtype_to_count:
spec = self.dtype_to_spec[dtype]
count = dtype_to_count[dtype]
self.out.n() + 'header_out(' + spec.counter_name + ') = ' + count + ' * call_count'
pass
def output_function_end(self):
self.out + ' &'
self.out.n() + ')'
def output_function_start(self):
self.out.n()
if not self.specification.result_type is None:
spec = self.dtype_to_spec[self.specification.result_type]
# if self.specification.result_type == 'string':
# self.out + 'output_characters('
# self.out + '( (' + self.index_string(0) + ')* ' + self.MAX_STRING_LEN + ')'
# self.out + ':' + '(((' + self.index_string(0) + ')+1)*' + self.MAX_STRING_LEN + '-1)'
# self.out + ') = &'
# self.out.lf()
# else:
self.out + spec.output_var_name
self.out + '(' + self.index_string(0) + ')' + ' = '
else:
self.out + 'CALL '
self.out + self.specification.name
if self.must_add_underscore_to_function(self.specification):
self.out + '_'
self.out + '('
def output_casestmt_start(self):
self.out + 'CASE(' + self.specification.id + ')'
def output_casestmt_end(self):
self.out.n()
def must_add_underscore_to_function(self, x):
for cls in self.underscore_functions_from_specification_classes:
if hasattr(cls, x.name):
return True
return False
class GenerateAFortranSourcecodeStringFromASpecificationClass(GenerateASourcecodeStringFromASpecificationClass):
MAX_STRING_LEN = 256
@late
def dtype_to_spec(self):
return dtype_to_spec
@late
def number_of_types(self):
return len(self.dtype_to_spec)
@late
def length_of_the_header(self):
return 2 + self.number_of_types
@late
def underscore_functions_from_specification_classes(self):
return []
def output_sourcecode_for_function(self):
result = GenerateAFortranStringOfAFunctionSpecification()
result.underscore_functions_from_specification_classes = self.underscore_functions_from_specification_classes
return result
def output_needs_mpi(self):
self.out.lf() + 'logical NEEDS_MPI'
if (hasattr(self, 'needs_mpi') and self.needs_mpi) and self.must_generate_mpi:
self.out.lf() + 'parameter (NEEDS_MPI=.true.)'
else:
self.out.lf() + 'parameter (NEEDS_MPI=.false.)'
self.out.lf().lf()
def start(self):
self.use_iso_c_bindings = config.compilers.fc_iso_c_bindings
self.out + GETSET_WORKING_DIRECTORY.format("" if not config.compilers.ifort_version else " use ifport")
self.out + 'program amuse_worker_program'
self.out.indent()
self.output_modules()
if self.use_iso_c_bindings:
self.out.n() + 'use iso_c_binding'
self.out.n() + 'implicit none'
self.out.n() + CONSTANTS_STRING
self.output_needs_mpi()
self.output_maximum_constants()
if self.must_generate_mpi:
self.out.lf().lf() + MODULE_GLOBALS_STRING
else:
self.out.lf().lf() + NOMPI_MODULE_GLOBALS_STRING
if self.use_iso_c_bindings:
self.out.n() + ISO_ARRAY_DEFINES_STRING
else:
self.out.n() + ARRAY_DEFINES_STRING
self.out.lf().lf() + MAIN_STRING
self.out.lf().lf() + 'CONTAINS'
self.out + POLLING_FUNCTIONS_STRING
self.out + GETSET_WORKING_DIRECTORY.format("" if not config.compilers.ifort_version else " use ifport")
if self.must_generate_mpi:
self.out + INTERNAL_FUNCTIONS_STRING
if self.use_iso_c_bindings:
self.out + RECV_HEADER_SLEEP_STRING
else:
self.out + RECV_HEADER_WAIT_STRING
self.out + RUN_LOOP_MPI_STRING
else:
self.out + NOMPI_INTERNAL_FUNCTIONS_STRING
self.out + EMPTY_RUN_LOOP_MPI_STRING
if self.use_iso_c_bindings:
self.out.n() + RUN_LOOP_SOCKETS_STRING
if self.must_generate_mpi:
self.out.n() + RUN_LOOP_SOCKETS_MPI_STRING
else:
self.out.n() + EMPTY_RUN_LOOP_SOCKETS_MPI_STRING
else:
self.out.n() + EMPTY_RUN_LOOP_SOCKETS_STRING
self.out.n() + EMPTY_RUN_LOOP_SOCKETS_MPI_STRING
self.output_handle_call()
self.out.dedent()
self.out.n() + 'end program amuse_worker_program'
self._result = self.out.string
def output_mpi_include(self):
self.out.n() + "USE mpi"
def output_modules(self):
self.out.n()
if hasattr(self.specification_class, 'use_modules'):
for x in self.specification_class.use_modules:
self.out.n() + 'use ' + x
def must_include_declaration_of_function(self, x):
if hasattr(x.specification,"internal_provided"):
return False
return True
def output_declarations_for_the_functions(self):
if not hasattr(self.specification_class, 'use_modules'):
for x in self.interface_functions:
if not self.must_include_declaration_of_function(x):
continue
specification = x.specification
if specification.id == 0:
continue
if specification.result_type is None:
continue
if specification.result_type == 'string':
type = 'character(len=255)'
else:
spec = self.dtype_to_spec[specification.result_type]
type = spec.type
self.out.lf() + type + ' :: ' + specification.name
if self.must_add_underscore_to_function(x):
self.out + '_'
def must_add_underscore_to_function(self, x):
for cls in self.underscore_functions_from_specification_classes:
if hasattr(cls, x.specification.name):
return True
return False
def output_handle_call(self):
self.out.lf() + 'integer function handle_call()'
self.out.indent().n()
self.out.lf() + 'implicit none'
self.output_declarations_for_the_functions()
self.out.lf() + 'integer i, call_count'
self.out.lf() + 'call_count = header_in(HEADER_CALL_COUNT)'
self.out.lf() + 'handle_call = 1'
self.out.lf() + 'SELECT CASE (header_in(HEADER_FUNCTION_ID))'
self.out.indent().n()
self.out.lf() + 'CASE(0)'
self.out.indent().lf()+'handle_call = 0'
self.out.dedent()
self.output_sourcecode_for_functions()
self.out.lf() + 'CASE DEFAULT'
self.out.indent()
self.out.lf() + 'header_out(HEADER_STRING_COUNT) = 1'
self.out.lf() + 'header_out(HEADER_FLAGS) = IOR(header_out(HEADER_FLAGS), 256) '
self.out.lf() + "strings_out(1) = 'error, illegal function id'"
self.out.dedent()
self.out.dedent().n() + 'END SELECT'
self.out.n() + 'return'
self.out.dedent()
self.out.n() + 'end function'
def output_maximum_constants(self):
self.out.lf() + 'integer MAX_INTEGERS_IN, MAX_INTEGERS_OUT, MAX_LONGS_IN, MAX_LONGS_OUT, &'
self.out.lf() + 'MAX_FLOATS_IN, MAX_FLOATS_OUT, MAX_DOUBLES_IN,MAX_DOUBLES_OUT, &'
self.out.lf() + 'MAX_BOOLEANS_IN,MAX_BOOLEANS_OUT, MAX_STRINGS_IN, MAX_STRINGS_OUT'
self.out.lf()
for dtype in self.dtype_to_spec.keys():
dtype_spec = self.dtype_to_spec[dtype]
maximum = self.mapping_from_dtype_to_maximum_number_of_inputvariables.get(dtype,0)
self.out.n() + 'parameter (MAX_' + dtype_spec.input_var_name.upper() + '=' + maximum + ')'
maximum =self.mapping_from_dtype_to_maximum_number_of_outputvariables.get(dtype,0)
self.out.n() + 'parameter (MAX_' + dtype_spec.output_var_name.upper() + '=' + maximum + ')'
class GenerateAFortranStubStringFromASpecificationClass\
(GenerateASourcecodeStringFromASpecificationClass):
@late
def dtype_to_spec(self):
return dtype_to_spec
@late
def ignore_functions_from_specification_classes(self):
return []
@late
def underscore_functions_from_specification_classes(self):
return []
def output_sourcecode_for_function(self):
result = create_definition.CreateFortranStub()
result.output_definition_only = False
return result
def start(self):
if hasattr(self.specification_class, 'use_modules'):
self.out.lf() + 'module {0}'.format(self.specification_class.use_modules[0])
self.out.indent()
self.output_modules(1)
if hasattr(self.specification_class, 'use_modules'):
self.out.lf() + "contains"
self.out.lf()
self.output_sourcecode_for_functions()
self.out.lf()
if hasattr(self.specification_class, 'use_modules'):
self.out.dedent()
self.out.lf() + "end module"
self.out.lf()
self._result = self.out.string
def must_include_interface_function_in_output(self, x):
if hasattr(x.specification,"internal_provided"):
return False
for cls in self.ignore_functions_from_specification_classes:
if hasattr(cls, x.specification.name):
return False
return True
def output_modules(self,skip=0):
self.out.n()
if hasattr(self.specification_class, 'use_modules'):
for x in self.specification_class.use_modules[skip:]:
self.out.n() + 'use ' + x
| 64,699
| 37.488995
| 132
|
py
|
amuse
|
amuse-main/src/amuse/units/optparse.py
|
import optparse
import textwrap
from amuse.units import quantities
def check_builtin_unit(option, opt, value):
(cvt, what) = optparse._builtin_cvt[option.type]
try:
result = cvt(value)
if option.unit is None:
return result
else:
return quantities.new_quantity(result, option.unit)
except ValueError:
raise optparse.OptionValueError(
"option %s: invalid %s value: %r" % (opt, what, value))
class Option(optparse.Option):
TYPE_CHECKER = {
"int" : check_builtin_unit,
"long" : check_builtin_unit,
"float" : check_builtin_unit,
"complex": check_builtin_unit,
"choice" : optparse.check_choice,
}
ATTRS = optparse.Option.ATTRS + ['unit',]
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
class IndentedHelpFormatter(optparse.IndentedHelpFormatter):
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
optparse.IndentedHelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
self.unit_tag = "%unit"
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_text = self.expand_unit(option, help_text)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def expand_unit(self, option, help_text):
if self.parser is None or not self.unit_tag:
return help_text
if option.unit is None:
return help_text
unit = option.unit
return help_text.replace(self.unit_tag, str(unit))
class OptionParser(optparse.OptionParser):
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
if formatter is None:
formatter = IndentedHelpFormatter()
optparse.OptionParser.__init__(
self, usage,
option_list, option_class,
version, conflict_handler,
description, formatter,
add_help_option, prog, epilog
)
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
elif not option.unit is None and not quantities.is_quantity(default):
defaults[option.dest] = quantities.new_quantity(default, option.unit)
return optparse.Values(defaults)
| 4,718
| 32.468085
| 85
|
py
|
amuse
|
amuse-main/src/amuse/units/core.py
|
from amuse.support.core import late
from amuse.support import exceptions
import numpy
from amuse.support.core import memoize
from amuse.support.core import MultitonMetaClass
class system(object):
ALL = {}
def __init__(self, name):
self.name = name
self.bases = []
self.mapping_from_base_name_to_base = {}
self.ALL[self.name] = self
self.index = len(self.ALL)
def reference_string(self):
return "{0}.get({1!r})".format('system', self.name)
def add_base(self, unit):
unit.system = self
unit.index = len(self.bases)
self.bases.append(unit)
self.mapping_from_base_name_to_base[unit.quantity] = unit
def base(self, name):
return self.mapping_from_base_name_to_base[name]
@classmethod
def get(cls, name):
try:
return cls.ALL[name]
except KeyError as ex:
from amuse.units import nbody_system
from amuse.units import si
return cls.ALL[name]
def __reduce__(self):
return (get_system_with_name, (self.name,))
def __str__(self):
return self.name
class unit(object):
"""
Abstract base class for unit objects.
Two classes of units are defined:
base units
The base units in a given system of units. For SI, these
are meter, kilogram, second, ampere, kelvin, mole and
candele. See the si module :mod:`amuse.units.si`
derived units
Derived units are created by dividing or multiplying
with a number or with another unit. For example,
to get a velocity unit we can devine vel = 1000 * m / s
Units can also be named, by creating a named unit.
"""
__array_priority__ = 100
def __mul__(self, other):
if isinstance(other, unit):
return mul_unit(self, other)
else:
return other*self
# return factor_unit(other, self)
def __truediv__(self, other):
if isinstance(other, unit):
return div_unit(self, other)
else:
return (1.0/other)*self
# return factor_unit(1.0 / other, self)
def __rmul__(self, other):
if other == 1:
return self
else:
return factor_unit(other, self)
def __ror__(self, value):
"""Create a new Quantity object.
:argument value: numeric value of the quantity, can be
a number or a sequence (list or ndarray)
:returns: new ScalarQuantity or VectorQuantity object
with this unit
Examples
>>> from amuse.units import units
>>> 100 | units.kg
quantity<100 kg>
"""
return self.new_quantity(value)
def __rtruediv__(self, other):
return factor_unit(other, pow_unit(-1,self))
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
def __pow__(self, other):
if other == 1:
return self
else:
return pow_unit(other, self)
def __call__(self, x):
return self.new_quantity(x)
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, unit):
return self.base == other.base and self.factor == other.factor
else:
return False
def __ne__(self, other):
if isinstance(other, unit):
if (isinstance(self, base_unit) and isinstance(other, base_unit)) or \
isinstance(self, nonnumeric_unit) or isinstance(other, nonnumeric_unit):
return NotImplemented
return self.base != other.base and self.factor != other.factor
else:
return True
def __hash__(self):
return self._hash
@late
def _hash(self):
return hash(id(self))
@property
def dtype(self):
return None
@property
def number(self):
return 1.0
@property
def unit(self):
return self
def is_zero(self):
return False
def iskey(self):
return False
def new_quantity(self, value):
"""Create a new Quantity object.
:argument value: numeric value of the quantity, can be
a number or a sequence (list or ndarray)
:returns: new ScalarQuantity or VectorQuantity object
with this unit
"""
from amuse.units import quantities
return quantities.new_quantity(value, self)
def to_simple_form(self):
"""Convert unit to a form with only one factor and powers
:result: Unit with only a factor and power terms
>>> from amuse.units import units
>>> N = (units.m * units.kg) / (units.s * units.s)
>>> N
unit<m * kg / (s * s)>
>>> J = N * units.m
>>> J
unit<m * kg / (s * s) * m>
>>> J.to_simple_form()
unit<m**2 * kg * s**-2>
"""
if not self.base:
return none_unit('none', 'none') * self.factor
result = self.factor
for n, base in self.base:
if n == 1:
if result == 1:
result = base
else:
result = result * base
else:
result = result * (base ** n)
return result
def to_reduced_form(self):
"""Convert unit to a reduced (simpler) form
"""
if not self.base:
return none_unit('none', 'none') * self.factor
total_factor = 1
combined_unit = None
for factor, power, unit in self.get_parts_with_power():
total_factor *= factor
if power == 0:
pass
else:
if combined_unit is None:
combined_unit = unit ** power
else:
combined_unit = combined_unit * (unit ** power)
if total_factor == 1:
return combined_unit
else:
return factor_unit(total_factor , combined_unit)
def to_factor_and_reduced_form(self):
"""Convert unit to a reduced (simpler) form
"""
if not self.base:
return none_unit('none', 'none') * self.factor
total_factor = 1
combined_unit = None
for factor, power, unit in self.get_parts_with_power():
total_factor *= factor
if power == 0:
pass
else:
if combined_unit is None:
combined_unit = unit ** power
else:
combined_unit = combined_unit * (unit ** power)
return total_factor , combined_unit
def are_bases_equal(self, other):
if len(self.base) != len(other.base):
return False
for n1, unit1 in sorted(self.base, key=lambda x: x[1].index):
found = False
for n2, unit2 in other.base:
if unit1 == unit2:
if not n2 == n1:
return False
found = True
break
if not found:
return False
return True
def _compare_bases(self, other, eps = None):
if len(self.base) != len(other.base):
return False
if eps is None:
eps = numpy.finfo(numpy.double).eps
for (n1, unit1), (n2, unit2) in zip(self.base, other.base):
if not unit1 == unit2:
return False
if n1 == n2:
continue
else:
if abs(n1 - n2) < eps:
continue
if abs(n2) > abs(n1):
relativeError = abs((n1 - n2) * 1.0 / n2)
else:
relativeError = abs((n1 - n2) * 1.0 / n1)
if relativeError <= eps:
continue
else:
return False
return True
@memoize
def conversion_factor_from(self, x):
if x.base is None:
return self.factor * 1.0
elif self._compare_bases(x):
this_factor = self.factor * 1.0
other_factor = x.factor
return 1*(this_factor == other_factor) or this_factor / other_factor
else:
raise IncompatibleUnitsException(x, self)
def in_(self, x):
"""Express this quantity in the given unit
:argument unit: The unit to express this quantity in
:result: A Quantity object
Examples
>>> from amuse.units import units
>>> l = 1 | units.AU
>>> l.in_(units.km)
quantity<149597870.691 km>
"""
return self.as_quantity_in(x)
def as_quantity_in(self, unit):
"""Express this unit as a quantity in the given unit
:argument unit: The unit to express this unit in
:result: A Quantity object
Examples
>>> from amuse.units import units
>>> ton = 1000 * units.kg
>>> ton.as_quantity_in(units.kg)
quantity<1000.0 kg>
"""
from amuse.units import quantities
if isinstance(unit, quantities.Quantity):
raise exceptions.AmuseException("Cannot expres a unit in a quantity")
factor = self.conversion_factor_from(unit)
return quantities.new_quantity(factor, unit)
def value_in(self, unit):
"""
Return a numeric value of this unit in the given unit.
Works only when the units are compatible, i.e. from
tonnage to kg's.
A number is returned without any unit information.
:argument unit: wanted unit of the value
:returns: number in the given unit
>>> from amuse.units import units
>>> x = units.km
>>> x.value_in(units.m)
1000.0
"""
return self.conversion_factor_from(unit)
def __repr__(self):
return 'unit<'+str(self)+'>'
def combine_bases(self, base1, base2):
indexed1 = [None] * 7
for n1, unit1 in base1:
indexed1[unit1.index] = (n1, unit1)
indexed2 = [None] * 7
for n2, unit2 in base2:
indexed2[unit2.index] = (n2, unit2)
result = []
for sub1, sub2 in zip(indexed1, indexed2):
if not sub1 is None:
if not sub2 is None:
if sub1[1] == sub2[1]:
result.append((sub1[0], sub2[0], sub1[1]))
else:
raise exceptions.AmuseException("Cannot combine units from "
"different systems: {0} and {1}".format(sub1[1], sub2[1]))
else:
result.append((sub1[0], 0, sub1[1]))
elif not sub2 is None:
result.append((0, sub2[0], sub2[1]))
return result
def has_same_base_as(self, other):
"""Determine if the base of other is the same as the
base of self.
:argument other: unit to compare base to
:result: True, if bases are compatiple.
>>> from amuse.units import units
>>> mps = units.m / units.s
>>> kph = units.km / units.hour
>>> mps.has_same_base_as(kph)
True
>>> mps.has_same_base_as(units.km)
False
"""
return other.base == self.base
def base_unit(self):
if not self.base:
return none_unit('none', 'none')
unit = 1
for n, base in self.base:
if n == 1:
unit = unit*base
else:
unit = unit*(base ** n)
return unit
def is_non_numeric(self):
return False
def is_generic(self):
return False
def is_none(self):
return False
def get_parts_with_power(self):
"""
The parts of this units as a list of tuple with factor, power and unit
"""
return ((1.0, 1, self),)
def convert_result_value(self, method, definition, value):
return self.new_quantity(value)
def convert_argument_value(self, method, definition, value):
return value.value_in(self)
def append_result_value(self, method, definition, value, result):
result.append(self.convert_result_value(method, definition, value))
def to_array_of_floats(self):
"""Represent a unit as an array of 8 64-bit floats. First float represents the factor, the other 7 the power of each base unit.
Cannot be used for non numeric units
"""
result = numpy.zeros(9, dtype=numpy.float64)
if not self.base:
return result
result[0] = self.factor
for n, base in self.base:
result[base.index + 2] = n
result[1] = base.system.index
return result
def describe_array_of_floats(self):
"""Create a human readable description of the array of floats
"""
if not self.base:
return 'not a numerical unit'
parts = ['factor']
parts.extend(['-']*8)
for n, base in self.base:
if n != 0:
parts[base.index + 2] = str(base)
else:
parts[base.index + 2] = '-'
parts[1] = str(base.system)
return ', '.join(parts)
@property
def base_system(self):
base=self.base
system=self.base[0][1].system
for b in base:
if system!=b[1].system:
raise Exception("inconsistent unit found")
return self.base[0][1].system
class base_unit(unit):
"""
base_unit objects are orthogonal, indivisable units
of a sytem of units.
A system of units contains a set of base units
:argument quantity: name of the base quantity, for example *length*
:argument name: name of the unit, for example *meter*
:argument symbol: symbol of the unit, for example *m*
:argument system: system of units object
>>> cgs = system("cgs")
>>> cm = base_unit("length", "centimetre", "cm", cgs)
>>> cm
unit<cm>
"""
def __init__(self, quantity, name, symbol, system):
self.quantity = quantity
self.name = name
self.symbol = symbol
self.system = system
system.add_base(self)
def __str__(self):
return self.symbol
def __hash__(self):
return self._hash
@property
def factor(self):
"""
The multiplication factor of a unit.
For example, factor is 1000 for km.
"""
return 1
@late
def base(self):
"""
The base represented as a list of tuples.
Each tuple consists of an power and a unit.
"""
return ((1,self),)
def reference_string(self):
return '{0}.base({1!r})'.format(self.system.reference_string(), self.quantity)
def __reduce__(self):
return (get_base_unit_with_name, (self.system, self.quantity,))
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, base_unit):
return NotImplemented
else:
return False
class no_system(object):
ALL = {}
@classmethod
def set(cls, unit):
cls.ALL[unit.name] = unit
@classmethod
def get(cls, name):
return cls.ALL[name]
class none_unit(unit, metaclass=MultitonMetaClass):
def __init__(self, name, symbol):
self.name = name
self.symbol = symbol
no_system.set(self)
def __str__(self):
return self.symbol
def reference_string(self):
return 'no_system.get({0!r})'.format(self.name)
@late
def factor(self):
return 1
@late
def base(self):
return ()
def get_parts_with_power(self):
return ()
def is_none(self):
return True
class zero_unit(none_unit):
def __init__(self):
none_unit.__init__(self,'zero', 'zero')
def __str__(self):
return self.symbol
def is_zero(self):
return True
@late
def base(self):
return None
def get_parts_with_power(self):
return None
def conversion_factor_from(self, x):
if x.base is None:
return 1.0
else:
return x.factor
class key_unit(none_unit):
def iskey(self):
return True
@property
def dtype(self):
return numpy.dtype('uint64')
class nonnumeric_unit(unit):
"""
nonnumeric_unit objects are indivisable units
not connected to any system of units.
nonnumeric_units cannot be used to
derive new units from.
nonnumeric_units have no physical meaning.
"""
def __init__(self, name, symbol):
self.name = name
self.symbol = symbol
no_system.set(self)
def __str__(self):
return self.symbol
def reference_string(self):
return 'no_system.get({0!r})'.format(self.name)
def __mul__(self, other):
if other == 1:
return self
raise exceptions.AmuseException("Cannot derive other units from a non numeric unit")
def __truediv__(self, other):
raise exceptions.AmuseException("Cannot derive other units from a non numeric unit")
def __rmul__(self, other):
if other == 1:
return self
raise exceptions.AmuseException("Cannot derive other units from a non numeric unit")
def __rtruediv__(self, other):
if other == 1:
return self
raise exceptions.AmuseException("Cannot derive other units from a non numeric unit")
def __pow__(self, other):
if other == 1:
return self
raise exceptions.AmuseException("Cannot derive other units from a non numeric unit")
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
def is_non_numeric(self):
return True
@property
def factor(self):
return 1
@property
def base(self):
return ((1,self),)
def value_to_string(self, value):
return None
def is_valid_value(self, value):
return False
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, nonnumeric_unit):
return NotImplemented
else:
return False
class string_unit(nonnumeric_unit):
"""
String unit objects define quantities with a string value.
These have no physical meaning, but are needed for some
legacy codes. For example the path of a file.
"""
def __init__(self, name, symbol):
nonnumeric_unit.__init__(self, name, symbol)
def value_to_string(self, value):
return '' if value is None else value
def is_valid_value(self, value):
return value is None or isinstance(value, str)
@property
def dtype(self):
return numpy.dtype('S256')
class enumeration_unit(nonnumeric_unit):
DEFINED={}
"""
Enumeration unit objects define a fixed set of quantities.
A quantity with a enumeration_unit can only have a
value defined in the set of values of the enumeration_unit.
:argument possible_values: A sequence or iterable with all
the possible values. If None the possible values are
integers ranging from 0 to the length of the
names_for_values argument
:argument names_for_values: A sequence of strings defining a
display name for each value. If None the names are the
string vales of the values in the possible_values arguments
Examples
>>> my_unit = enumeration_unit('my_unit','my_unit', [1,2,5], ["star","gas","planet"])
>>> 2 | my_unit
quantity<2 - gas>
>>> list(my_unit.quantities())
[quantity<1 - star>, quantity<2 - gas>, quantity<5 - planet>]
>>> 3 | my_unit
Traceback (most recent call last):
...
AmuseException: <3> is not a valid value for unit<my_unit>
Or, with default values:
>>> my_unit = enumeration_unit('my_unit','my_unit', None, ["star","gas","planet"])
>>> 2 | my_unit
quantity<2 - planet>
>>> list(my_unit.quantities())
[quantity<0 - star>, quantity<1 - gas>, quantity<2 - planet>]
"""
def __init__(self, name, symbol, possible_values = None, names_for_values = None):
nonnumeric_unit.__init__(self, name, symbol)
self.possible_values = self._initial_list_of_possible_values(possible_values, names_for_values)
self.names_for_values = self._initial_names_for_values(possible_values, names_for_values)
if not len(self.possible_values) == len(self.names_for_values):
raise exceptions.AmuseException("Must provide equal lenght list for values({0}) and names({1})".format(len(self.possible_values), len(self.names_for_values)))
self.mapping_from_values_to_names = self._initial_mapping_from_values_to_names()
self.DEFINED[name] = self
def _initial_list_of_possible_values(self, possible_values, names_for_values):
if possible_values is None:
if names_for_values is None:
raise exceptions.AmuseException("Must provide a list of values and / or a list of names for each value")
return list(range(len(names_for_values)))
else:
return list(possible_values)
def _initial_mapping_from_values_to_names(self):
result = {}
for value, name in zip(self.possible_values, self.names_for_values):
result[value] = name
return result
def _initial_names_for_values(self, possible_values, names_for_values):
if names_for_values is None:
if possible_values is None:
raise exceptions.AmuseException("Must provide a list of values and / or a list of names for each value")
return [str(x) for x in possible_values]
else:
return list(names_for_values)
def __hash__(self):
return self._hash
def is_valid_value(self, value):
return value in self.mapping_from_values_to_names
def value_to_string(self, value):
return self.mapping_from_values_to_names[value]
def quantities(self):
for x in self.possible_values:
yield x | self
def __call__(self, string):
index = self.names_for_values.index(string)
if index > 0:
return self.possible_values[index] | self
else:
raise exceptions.AmuseException("{0} is not a valid name for {1} enumeration type".format(string, self.name))
@property
def dtype(self):
return numpy.dtype('int32')
@classmethod
def get(cls, name):
try:
return cls.DEFINED[name]
except KeyError as ex:
from amuse.units import nbody_system
from amuse.units import si
return cls.DEFINED[name]
def __reduce__(self):
return (get_enumeration_unit_with_name, (self.name,))
class named_unit(unit):
"""
A named_unit object defines an alias for another
unit. When printing a named_unit, the symbol
is shown and not the unit parts. For all other
operations the named_units works exactly like
the aliased unit.
:argument name: Long name or description of the unit
:argument symbol: Short name to show when printing units
or quantities
:argument unit: The unit to alias
>>> from amuse.units import si
>>> 60.0 * si.s
unit<60.0 * s>
>>> minute = named_unit("minute","min", 60*si.s)
>>> minute
unit<min>
>>> (20.0 | (60.0 * si.s)).as_quantity_in(minute)
quantity<20.0 min>
"""
def __init__(self, name, symbol, unit):
self.name = name
self.symbol = symbol
self.local_unit = unit
def __str__(self):
return self.symbol
def reference_string(self):
return self.to_simple_form().reference_string()
@late
def factor(self):
return self.local_unit.factor
@late
def base(self):
return self.local_unit.base
def is_none(self):
return self.local_unit.is_none()
class derived_unit(unit, metaclass=MultitonMetaClass):
"""
Abstract base class of derived units. New units
can be derived from base_units. Each operation on
a unit creates a new derived_unit.
"""
pass
class factor_unit(derived_unit):
"""
A factor_unit object defines a unit multiplied by
a number. Do not call this method directly,
factor_unit objects are supposed to be created by
multiplying a number with a unit.
:argument unit: The unit to derive from.
:argument factor: The multiplication factor.
>>> from amuse.units import si
>>> minute = 60.0 * si.s
>>> minute.as_quantity_in(si.s)
quantity<60.0 s>
>>> hour = 60.0 * minute
>>> hour
unit<60.0 * 60.0 * s>
>>> hour.as_quantity_in(si.s)
quantity<3600.0 s>
"""
def __init__(self, factor, unit, name = None, symbol = None):
self.name = name
self.symbol = symbol
self.local_factor = factor
self.local_unit = unit
def __str__(self):
if self.symbol is None:
return str(self.local_factor) + ' * ' + str(self.local_unit)
return self.symbol + str(self.local_unit)
def reference_string(self):
return '(' + str(self.local_factor) + ' * ' + self.local_unit.reference_string() + ')'
@late
def factor(self):
return self.local_factor * self.local_unit.factor
@late
def base(self):
return self.local_unit.base
def get_parts_with_power(self):
local_unit_parts = self.local_unit.get_parts_with_power()
result = []
is_first = True
for factor, power, unit in local_unit_parts:
if is_first:
factor *= self.local_factor
is_first = False
result.append( (factor, power, unit) )
return result
class mul_unit(derived_unit):
"""
A mul_unit object defines a unit multiplied by
another unit. Do not call this method directly,
mul_unit objects are supposed to be created by
multiplying units.
:argument left_hand: Left hand side of the multiplication.
:argument right_hand: Right hand side of the multiplication.
>>> from amuse.units import si
>>> area = si.m * si.m
>>> area
unit<m * m>
>>> hectare = (100 * si.m) * (100 * si.m)
>>> hectare.as_quantity_in(area)
quantity<10000.0 m * m>
"""
def __init__(self, left_hand, right_hand):
self.left_hand = left_hand
self.right_hand = right_hand
def __str__(self):
return str(self.left_hand) + ' * ' + str(self.right_hand)
def reference_string(self):
return '(' + self.left_hand.reference_string() + ' * ' + self.right_hand.reference_string() + ')'
@late
def factor(self):
return self.left_hand.factor * self.right_hand.factor
@late
def base(self):
return tuple(
[
x
for x in [
(x[0] + x[1], x[2])
for x in self.combine_bases(self.left_hand.base, self.right_hand.base)
]
if x[0] != 0
]
)
def get_parts_with_power(self):
lhs_parts = list(self.left_hand.get_parts_with_power())
rhs_parts = list(self.right_hand.get_parts_with_power())
result = []
for lhs_factor, lhs_power, lhs_unit in lhs_parts:
rhs_index = 0
found_match = False
for rhs_factor, rhs_power, rhs_unit in rhs_parts:
if lhs_unit is rhs_unit:
result.append( (lhs_factor * rhs_factor, lhs_power + rhs_power, lhs_unit,) )
found_match = True
del rhs_parts[rhs_index]
break
rhs_index += 1
if not found_match:
result.append( (lhs_factor, lhs_power, lhs_unit,))
for rhs_factor, rhs_power, rhs_unit in rhs_parts:
result.append( (rhs_factor, rhs_power, rhs_unit,))
return result
class pow_unit(derived_unit):
"""
A pow_unit object defines a unit as
another unit to a specified power.
Do not call this method directly,
pow_unit objects are supposed to be created by
taking powers of units.
:argument power: Power of the unit
:argument unit: The unit to derive from
>>> from amuse.units import si
>>> area = si.m**2
>>> area
unit<m**2>
>>> area.as_quantity_in(si.m * si.m)
quantity<1 m * m>
>>> hectare = (100 * si.m) ** 2
>>> hectare.as_quantity_in(area)
quantity<10000.0 m**2>
"""
def __init__(self, power, unit):
self.power = power
self.local_unit = unit
def __str__(self):
if isinstance(self.local_unit, derived_unit):
return '(' + str(self.local_unit) + ')**' + str(self.power)
else:
return str(self.local_unit) + '**' + str(self.power)
def reference_string(self):
return '(' + self.local_unit.reference_string() + '**' + str(self.power) + ')'
@late
def base(self):
return tuple(
[
x
for x in [
(x[0] * self.power, x[1])
for x in self.local_unit.base
]
if x[0] != 0
]
)
@late
def factor(self):
return self.local_unit.factor ** self.power
def get_parts_with_power(self):
result = []
for factor, power, unit in self.local_unit.get_parts_with_power():
result.append( (factor ** self.power, power * self.power, unit,))
return result
class div_unit(derived_unit):
"""
A div_unit object defines a unit multiplied by
another unit. Do not call this method directly,
div_unit objects are supposed to be created by
dividing units.
:argument left_hand: Left hand side of the multiplication.
:argument right_hand: Right hand side of the multiplication.
>>> from amuse.units import si
>>> speed = si.m / si.s
>>> speed
unit<m / s>
>>> speed_with_powers = si.m * si.s ** -1
>>> speed.as_quantity_in(speed_with_powers)
quantity<1 m * s**-1>
"""
def __init__(self, left_hand, right_hand):
self.left_hand = left_hand
self.right_hand = right_hand
def __str__(self):
if isinstance(self.right_hand, derived_unit):
return str(self.left_hand) + ' / (' + str(self.right_hand)+')'
else:
return str(self.left_hand) + ' / ' + str(self.right_hand)+''
def reference_string(self):
return '(' + self.left_hand.reference_string() + '/' + self.right_hand.reference_string() + ')'
@late
def factor(self):
return self.left_hand.factor * 1.0 / self.right_hand.factor
@late
def base(self):
return tuple(
[
x
for x in [
(x[0] - x[1], x[2])
for x in self.combine_bases(self.left_hand.base, self.right_hand.base)
]
if x[0] != 0
]
)
def get_parts_with_power(self):
lhs_parts = list(self.left_hand.get_parts_with_power())
rhs_parts = list(self.right_hand.get_parts_with_power())
result = []
for lhs_factor, lhs_power, lhs_unit in lhs_parts:
rhs_index = 0
found_match = False
for rhs_factor, rhs_power, rhs_unit in rhs_parts:
if lhs_unit is rhs_unit:
result.append( (lhs_factor / rhs_factor, lhs_power - rhs_power, lhs_unit,) )
found_match = True
del rhs_parts[rhs_index]
break
rhs_index += 1
if not found_match:
result.append( (lhs_factor, lhs_power, lhs_unit,))
for rhs_factor, rhs_power, rhs_unit in rhs_parts:
result.append( (1.0 / rhs_factor, -rhs_power, rhs_unit,))
return result
class UnitException(exceptions.AmuseException):
formatstring = "Unit exception: {0}"
class IncompatibleUnitsException(exceptions.AmuseException):
formatstring = "Cannot express {1} in {0}, the units do not have the same bases"
def __init__(self, *arguments):
Exception.__init__(self)
self.arguments = arguments
def get_system_with_name(name):
return system.get(name)
def get_enumeration_unit_with_name(name):
return enumeration_unit.get(name)
def get_base_unit_with_name(system, name):
return system.base(name)
class UnitWithSpecificDtype(named_unit):
def __init__(self, unit, dtype):
self.specific_dtype = dtype
symbol = str(unit) + "_" + str(dtype)
named_unit.__init__(self, symbol, symbol, unit)
@property
def dtype(self):
return self.specific_dtype
@memoize
def unit_with_specific_dtype(unit, dtype):
if unit is None or dtype is None:
return unit
return UnitWithSpecificDtype(unit, dtype)
| 33,642
| 27.106099
| 170
|
py
|
amuse
|
amuse-main/src/amuse/units/constants.py
|
#This is an auto generated file, do not change manually. Instead if you want to add constants
#or change them, change the nist.txt file and run nist.py
import numpy
from amuse.units.si import *
from amuse.units.derivedsi import *
#BASE UNITS***********************************************
X220X_lattice_spacing_of_silicon = 1.920155762e-10 | m
alpha_particle_mass = 6.6446562e-27 | kg
alpha_particle_mass_energy_equivalent = 5.97191917e-10 | J
Angstrom_star = 1.00001498e-10 | m
u = 1.660538782e-27 | kg
atomic_mass_constant_energy_equivalent = 1.49241783e-10 | J
atomic_mass_unit_hyphen_hertz_relationship = 2.2523427369e+23 | Hz
atomic_mass_unit_hyphen_joule_relationship = 1.49241783e-10 | J
atomic_mass_unit_hyphen_kelvin_relationship = 1.0809527e+13 | K
atomic_mass_unit_hyphen_kilogram_relationship = 1.660538782e-27 | kg
atomic_unit_of_charge = 1.602176487e-19 | C
atomic_unit_of_current = 0.00662361763 | A
atomic_unit_of_electric_potential = 27.21138386 | V
atomic_unit_of_energy = 4.35974394e-18 | J
atomic_unit_of_force = 8.23872206e-08 | N
atomic_unit_of_length = 5.2917720859e-11 | m
atomic_unit_of_mag_flux_density = 235051.7382 | T
atomic_unit_of_mass = 9.10938215e-31 | kg
atomic_unit_of_time = 2.4188843265e-17 | s
Bohr_radius = 5.2917720859e-11 | m
characteristic_impedance_of_vacuum = 376.730313461 | ohm
classical_electron_radius = 2.8179402894e-15 | m
Compton_wavelength = 2.4263102175e-12 | m
Compton_wavelength_over_2_pi = 3.8615926459e-13 | m
conductance_quantum = 7.7480917004e-05 | S
conventional_value_of_von_Klitzing_constant = 25812.807 | ohm
Cu_x_unit = 1.00207699e-13 | m
deuteron_mass = 3.3435832e-27 | kg
deuteron_mass_energy_equivalent = 3.00506272e-10 | J
deuteron_rms_charge_radius = 2.1402e-15 | m
electron_mass = 9.10938215e-31 | kg
electron_mass_energy_equivalent = 8.18710438e-14 | J
electron_volt = 1.602176487e-19 | J
electron_volt_hyphen_hertz_relationship = 2.417989454e+14 | Hz
electron_volt_hyphen_joule_relationship = 1.602176487e-19 | J
electron_volt_hyphen_kelvin_relationship = 11604.505 | K
electron_volt_hyphen_kilogram_relationship = 1.782661758e-36 | kg
elementary_charge = 1.602176487e-19 | C
Hartree_energy = 4.35974394e-18 | J
hartree_hyphen_hertz_relationship = 6.57968392072e+15 | Hz
hartree_hyphen_joule_relationship = 4.35974394e-18 | J
hartree_hyphen_kelvin_relationship = 315774.65 | K
hartree_hyphen_kilogram_relationship = 4.85086934e-35 | kg
helion_mass = 5.00641192e-27 | kg
helion_mass_energy_equivalent = 4.49953864e-10 | J
hertz_hyphen_joule_relationship = 6.62606896e-34 | J
hertz_hyphen_kelvin_relationship = 4.7992374e-11 | K
hertz_hyphen_kilogram_relationship = 7.372496e-51 | kg
inverse_meter_hyphen_hertz_relationship = 299792458.0 | Hz
inverse_meter_hyphen_joule_relationship = 1.986445501e-25 | J
inverse_meter_hyphen_kelvin_relationship = 0.014387752 | K
inverse_meter_hyphen_kilogram_relationship = 2.2102187e-42 | kg
inverse_of_conductance_quantum = 12906.4037787 | ohm
joule_hyphen_hertz_relationship = 1.50919045e+33 | Hz
joule_hyphen_kelvin_relationship = 7.242963e+22 | K
joule_hyphen_kilogram_relationship = 1.112650056e-17 | kg
kelvin_hyphen_hertz_relationship = 20836644000.0 | Hz
kelvin_hyphen_joule_relationship = 1.3806504e-23 | J
kelvin_hyphen_kilogram_relationship = 1.5361807e-40 | kg
kilogram_hyphen_hertz_relationship = 1.356392733e+50 | Hz
kilogram_hyphen_joule_relationship = 8.987551787e+16 | J
kilogram_hyphen_kelvin_relationship = 6.509651e+39 | K
lattice_parameter_of_silicon = 5.43102064e-10 | m
mag_flux_quantum = 2.067833667e-15 | Wb
Mo_x_unit = 1.00209955e-13 | m
muon_Compton_wavelength = 1.173444104e-14 | m
muon_Compton_wavelength_over_2_pi = 1.867594295e-15 | m
muon_mass = 1.8835313e-28 | kg
muon_mass_energy_equivalent = 1.69283351e-11 | J
natural_unit_of_energy = 8.18710438e-14 | J
natural_unit_of_length = 3.8615926459e-13 | m
natural_unit_of_mass = 9.10938215e-31 | kg
natural_unit_of_time = 1.288088657e-21 | s
neutron_Compton_wavelength = 1.3195908951e-15 | m
neutron_Compton_wavelength_over_2_pi = 2.1001941382e-16 | m
neutron_mass = 1.674927211e-27 | kg
neutron_mass_energy_equivalent = 1.505349505e-10 | J
Planck_length = 1.616252e-35 | m
Planck_mass = 2.17644e-08 | kg
Planck_temperature = 1.416785e+32 | K
Planck_time = 5.39124e-44 | s
proton_Compton_wavelength = 1.3214098446e-15 | m
proton_Compton_wavelength_over_2_pi = 2.1030890861e-16 | m
proton_mass = 1.672621637e-27 | kg
proton_mass_energy_equivalent = 1.503277359e-10 | J
proton_rms_charge_radius = 8.768e-16 | m
Rydberg_constant_times_c_in_Hz = 3.28984196036e+15 | Hz
Rydberg_constant_times_hc_in_J = 2.17987197e-18 | J
standard_atmosphere = 101325.0 | Pa
tau_Compton_wavelength = 6.9772e-16 | m
tau_Compton_wavelength_over_2_pi = 1.11046e-16 | m
tau_mass = 3.16777e-27 | kg
tau_mass_energy_equivalent = 2.84705e-10 | J
triton_mass = 5.00735588e-27 | kg
triton_mass_energy_equivalent = 4.50038703e-10 | J
unified_atomic_mass_unit = 1.660538782e-27 | kg
von_Klitzing_constant = 25812.807557 | ohm
#DERIVED UNITS***********************************************
alpha_particle_molar_mass = 0.00400150617913 | kg*mol**-1
atomic_mass_unit_hyphen_inverse_meter_relationship = 7.513006671e+14 | m**-1
atomic_unit_of_1st_hyperpolarizablity = 3.206361533e-53 | C**3*m**3*J**-2
atomic_unit_of_2nd_hyperpolarizablity = 6.23538095e-65 | C**4*m**4*J**-3
atomic_unit_of_action = 1.054571628e-34 | J*s
atomic_unit_of_charge_density = 1.0812023e+12 | C*m**-3
atomic_unit_of_electric_dipole_mom = 8.47835281e-30 | C*m
atomic_unit_of_electric_field = 5.14220632e+11 | V*m**-1
atomic_unit_of_electric_field_gradient = 9.71736166e+21 | V*m**-2
atomic_unit_of_electric_polarizablity = 1.6487772536e-41 | C**2*m**2*J**-1
atomic_unit_of_electric_quadrupole_mom = 4.48655107e-40 | C*m**2
atomic_unit_of_mag_dipole_mom = 1.85480183e-23 | J*T**-1
atomic_unit_of_magnetizability = 7.891036433e-29 | J*T**-2
atomic_unit_of_momentum = 1.992851565e-24 | kg*m*s**-1
atomic_unit_of_permittivity = 1.112650056e-10 | F*m**-1
atomic_unit_of_velocity = 2187691.2541 | m*s**-1
Avogadro_constant = 6.02214179e+23 | mol**-1
Bohr_magneton = 9.27400915e-24 | J*T**-1
Bohr_magneton_in_Hz_div_T = 13996246040.0 | Hz*T**-1
Bohr_magneton_in_inverse_meters_per_tesla = 46.6864515 | m**-1*T**-1
Bohr_magneton_in_K_div_T = 0.6717131 | K*T**-1
kB = 1.3806504e-23 | J*K**-1
Boltzmann_constant_in_Hz_div_K = 20836644000.0 | Hz*K**-1
Boltzmann_constant_in_inverse_meters_per_kelvin = 69.50356 | m**-1*K**-1
conventional_value_of_Josephson_constant = 4.835979e+14 | Hz*V**-1
deuteron_mag_mom = 4.33073465e-27 | J*T**-1
deuteron_molar_mass = 0.00201355321272 | kg*mol**-1
electric_constant = 8.854187817e-12 | F*m**-1
electron_charge_to_mass_quotient = -1.75882015e+11 | C*kg**-1
electron_gyromag_ratio = 1.76085977e+11 | s**-1*T**-1
electron_gyromag_ratio_over_2_pi = 28024.95364 | MHz*T**-1
electron_mag_mom = -9.28476377e-24 | J*T**-1
electron_molar_mass = 5.4857990943e-07 | kg*mol**-1
electron_volt_hyphen_inverse_meter_relationship = 806554.465 | m**-1
elementary_charge_over_h = 2.417989454e+14 | A*J**-1
Faraday_constant = 96485.3399 | C*mol**-1
first_radiation_constant = 3.74177118e-16 | W*m**2
first_radiation_constant_for_spectral_radiance = 1.191042759e-16 | W*m**2*sr**-1
hartree_hyphen_inverse_meter_relationship = 21947463.137 | m**-1
helion_molar_mass = 0.0030149322473 | kg*mol**-1
hertz_hyphen_inverse_meter_relationship = 3.335640951e-09 | m**-1
Josephson_constant = 4.83597891e+14 | Hz*V**-1
joule_hyphen_inverse_meter_relationship = 5.03411747e+24 | m**-1
kelvin_hyphen_inverse_meter_relationship = 69.50356 | m**-1
kilogram_hyphen_inverse_meter_relationship = 4.52443915e+41 | m**-1
Loschmidt_constant_X27315_K_and__101325_kPaX = 2.6867774e+25 | m**-3
mag_constant = 1.2566370614e-06 | N*A**-2
molar_gas_constant = 8.314472 | J*mol**-1*K**-1
molar_mass_constant = 0.001 | kg*mol**-1
molar_mass_of_carbon_hyphen_12 = 0.012 | kg*mol**-1
molar_Planck_constant = 3.9903126821e-10 | J*s*mol**-1
molar_Planck_constant_times_c = 0.11962656472 | J*m*mol**-1
molar_volume_of_ideal_gas_X27315_K_and__100_kPaX = 0.022710981 | m**3*mol**-1
molar_volume_of_ideal_gas_X27315_K_and__101325_kPaX = 0.022413996 | m**3*mol**-1
molar_volume_of_silicon = 1.20588349e-05 | m**3*mol**-1
muon_mag_mom = -4.49044786e-26 | J*T**-1
muon_molar_mass = 0.0001134289256 | kg*mol**-1
natural_unit_of_action = 1.054571628e-34 | J*s
natural_unit_of_momentum = 2.73092406e-22 | kg*m*s**-1
natural_unit_of_velocity = 299792458.0 | m*s**-1
neutron_gyromag_ratio = 183247185.0 | s**-1*T**-1
neutron_gyromag_ratio_over_2_pi = 29.1646954 | MHz*T**-1
neutron_mag_mom = -9.6623641e-27 | J*T**-1
neutron_molar_mass = 0.00100866491597 | kg*mol**-1
G = 6.67428e-11 | m**3*kg**-1*s**-2
nuclear_magneton = 5.05078324e-27 | J*T**-1
nuclear_magneton_in_inverse_meters_per_tesla = 0.02542623616 | m**-1*T**-1
nuclear_magneton_in_K_div_T = 0.00036582637 | K*T**-1
nuclear_magneton_in_MHz_div_T = 7.62259384 | MHz*T**-1
h = 6.62606896e-34 | J*s
Planck_constant_over_2_pi = 1.054571628e-34 | J*s
proton_charge_to_mass_quotient = 95788339.2 | C*kg**-1
proton_gyromag_ratio = 267522209.9 | s**-1*T**-1
proton_gyromag_ratio_over_2_pi = 42.5774821 | MHz*T**-1
proton_mag_mom = 1.410606662e-26 | J*T**-1
proton_molar_mass = 0.00100727646677 | kg*mol**-1
quantum_of_circulation = 0.00036369475199 | m**2*s**-1
quantum_of_circulation_times_2 = 0.000727389504 | m**2*s**-1
Rydberg_constant = 10973731.5685 | m**-1
second_radiation_constant = 0.014387752 | m*K
shielded_helion_gyromag_ratio = 203789473.0 | s**-1*T**-1
shielded_helion_gyromag_ratio_over_2_pi = 32.43410198 | MHz*T**-1
shielded_helion_mag_mom = -1.074552982e-26 | J*T**-1
shielded_proton_gyromag_ratio = 267515336.2 | s**-1*T**-1
shielded_proton_gyromag_ratio_over_2_pi = 42.5763881 | MHz*T**-1
shielded_proton_mag_mom = 1.410570419e-26 | J*T**-1
c = 299792458.0 | m*s**-1
standard_acceleration_of_gravity = 9.80665 | m*s**-2
Stefan_hyphen_Boltzmann_constant = 5.6704e-08 | W*m**-2*K**-4
tau_molar_mass = 0.00190768 | kg*mol**-1
Thomson_cross_section = 6.652458558e-29 | m**2
triton_mag_mom = 1.504609361e-26 | J*T**-1
triton_molar_mass = 0.0030155007134 | kg*mol**-1
Wien_frequency_displacement_law_constant = 58789330000.0 | Hz*K**-1
Wien_wavelength_displacement_law_constant = 0.0028977685 | m*K
#RATIOS ***********************************************
alpha_particle_hyphen_electron_mass_ratio = 7294.2995365 | none
alpha_particle_hyphen_proton_mass_ratio = 3.97259968951 | none
deuteron_hyphen_electron_mag_mom_ratio = -0.0004664345537 | none
deuteron_hyphen_electron_mass_ratio = 3670.4829654 | none
deuteron_g_factor = 0.8574382308 | none
deuteron_mag_mom_to_Bohr_magneton_ratio = 0.0004669754556 | none
deuteron_mag_mom_to_nuclear_magneton_ratio = 0.8574382308 | none
deuteron_hyphen_neutron_mag_mom_ratio = -0.44820652 | none
deuteron_hyphen_proton_mag_mom_ratio = 0.307012207 | none
deuteron_hyphen_proton_mass_ratio = 1.99900750108 | none
electron_hyphen_deuteron_mag_mom_ratio = -2143.923498 | none
electron_hyphen_deuteron_mass_ratio = 0.00027244371093 | none
electron_g_factor = -2.00231930436 | none
electron_mag_mom_anomaly = 0.00115965218111 | none
electron_mag_mom_to_Bohr_magneton_ratio = -1.00115965218 | none
electron_mag_mom_to_nuclear_magneton_ratio = -1838.28197092 | none
electron_hyphen_muon_mag_mom_ratio = 206.7669877 | none
electron_hyphen_muon_mass_ratio = 0.00483633171 | none
electron_hyphen_neutron_mag_mom_ratio = 960.9205 | none
electron_hyphen_neutron_mass_ratio = 0.00054386734459 | none
electron_hyphen_proton_mag_mom_ratio = -658.2106848 | none
electron_hyphen_proton_mass_ratio = 0.00054461702177 | none
electron_hyphen_tau_mass_ratio = 0.000287564 | none
electron_to_alpha_particle_mass_ratio = 0.00013709335557 | none
electron_to_shielded_helion_mag_mom_ratio = 864.058257 | none
electron_to_shielded_proton_mag_mom_ratio = -658.2275971 | none
fine_hyphen_structure_constant = 0.0072973525376 | none
helion_hyphen_electron_mass_ratio = 5495.8852765 | none
helion_hyphen_proton_mass_ratio = 2.9931526713 | none
inverse_fine_hyphen_structure_constant = 137.035999679 | none
muon_hyphen_electron_mass_ratio = 206.7682823 | none
muon_g_factor = -2.0023318414 | none
muon_mag_mom_anomaly = 0.00116592069 | none
muon_mag_mom_to_Bohr_magneton_ratio = -0.00484197049 | none
muon_mag_mom_to_nuclear_magneton_ratio = -8.89059705 | none
muon_hyphen_neutron_mass_ratio = 0.1124545167 | none
muon_hyphen_proton_mag_mom_ratio = -3.183345137 | none
muon_hyphen_proton_mass_ratio = 0.1126095261 | none
muon_hyphen_tau_mass_ratio = 0.0594592 | none
neutron_hyphen_electron_mag_mom_ratio = 0.00104066882 | none
neutron_hyphen_electron_mass_ratio = 1838.6836605 | none
neutron_g_factor = -3.82608545 | none
neutron_mag_mom_to_Bohr_magneton_ratio = -0.00104187563 | none
neutron_mag_mom_to_nuclear_magneton_ratio = -1.91304273 | none
neutron_hyphen_muon_mass_ratio = 8.89248409 | none
neutron_hyphen_proton_mag_mom_ratio = -0.68497934 | none
neutron_hyphen_proton_mass_ratio = 1.00137841918 | none
neutron_hyphen_tau_mass_ratio = 0.52874 | none
neutron_to_shielded_proton_mag_mom_ratio = -0.68499694 | none
proton_hyphen_electron_mass_ratio = 1836.15267247 | none
proton_g_factor = 5.585694713 | none
proton_mag_mom_to_Bohr_magneton_ratio = 0.001521032209 | none
proton_mag_mom_to_nuclear_magneton_ratio = 2.792847356 | none
proton_mag_shielding_correction = 2.5694e-05 | none
proton_hyphen_muon_mass_ratio = 8.88024339 | none
proton_hyphen_neutron_mag_mom_ratio = -1.45989806 | none
proton_hyphen_neutron_mass_ratio = 0.99862347824 | none
proton_hyphen_tau_mass_ratio = 0.528012 | none
Sackur_hyphen_Tetrode_constant_X1_K_and__100_kPaX = -1.1517047 | none
Sackur_hyphen_Tetrode_constant_X1_K_and__101325_kPaX = -1.1648677 | none
shielded_helion_mag_mom_to_Bohr_magneton_ratio = -0.001158671471 | none
shielded_helion_mag_mom_to_nuclear_magneton_ratio = -2.127497718 | none
shielded_helion_to_proton_mag_mom_ratio = -0.761766558 | none
shielded_helion_to_shielded_proton_mag_mom_ratio = -0.7617861313 | none
shielded_proton_mag_mom_to_Bohr_magneton_ratio = 0.001520993128 | none
shielded_proton_mag_mom_to_nuclear_magneton_ratio = 2.792775598 | none
tau_hyphen_electron_mass_ratio = 3477.48 | none
tau_hyphen_muon_mass_ratio = 16.8183 | none
tau_hyphen_neutron_mass_ratio = 1.89129 | none
tau_hyphen_proton_mass_ratio = 1.8939 | none
triton_hyphen_electron_mag_mom_ratio = -0.001620514423 | none
triton_hyphen_electron_mass_ratio = 5496.9215269 | none
triton_g_factor = 5.957924896 | none
triton_mag_mom_to_Bohr_magneton_ratio = 0.001622393657 | none
triton_mag_mom_to_nuclear_magneton_ratio = 2.978962448 | none
triton_hyphen_neutron_mag_mom_ratio = -1.55718553 | none
triton_hyphen_proton_mag_mom_ratio = 1.066639908 | none
triton_hyphen_proton_mass_ratio = 2.9937170309 | none
weak_mixing_angle = 0.22255 | none
#DERIVED CONSTANTS***********************************************
pi = numpy.pi
hbar = h / (2.0 * numpy.pi)
four_pi_stefan_boltzmann = 4.0 * numpy.pi * Stefan_hyphen_Boltzmann_constant
mu0 = 4 * numpy.pi * 1.e-7 | N/A**2
eps0 = mu0**-1 * c**-2
sidereal_day = 86164.100352 | s
#machine constants
eps = numpy.finfo(numpy.double).eps
precision = int(numpy.log10(2/eps))
#DROPPED UNITS***********************************************
"""alpha_particle_mass_energy_equivalent_in_MeV = 3727.379109 | MeV
alpha_particle_mass_in_u = 4.00150617913 | u
atomic_mass_constant_energy_equivalent_in_MeV = 931.494028 | MeV
atomic_mass_unit_hyphen_electron_volt_relationship = 931494028.0 | eV
atomic_mass_unit_hyphen_hartree_relationship = 34231777.149 | E_h
Bohr_magneton_in_eV_div_T = 5.7883817555e-05 | eV*T**-1
kBeV = 8.617343e-05 | eV*K**-1
deuteron_mass_energy_equivalent_in_MeV = 1875.612793 | MeV
deuteron_mass_in_u = 2.01355321272 | u
electron_mass_energy_equivalent_in_MeV = 0.51099891 | MeV
electron_mass_in_u = 0.00054857990943 | u
electron_volt_hyphen_atomic_mass_unit_relationship = 1.073544188e-09 | u
electron_volt_hyphen_hartree_relationship = 0.0367493254 | E_h
Faraday_constant_for_conventional_electric_current = 96485.3401 | C_90*mol**-1
Fermi_coupling_constant = 1.16637e-05 | GeV**-2
hartree_hyphen_atomic_mass_unit_relationship = 2.9212622986e-08 | u
hartree_hyphen_electron_volt_relationship = 27.21138386 | eV
Hartree_energy_in_eV = 27.21138386 | eV
helion_mass_energy_equivalent_in_MeV = 2808.391383 | MeV
helion_mass_in_u = 3.0149322473 | u
hertz_hyphen_atomic_mass_unit_relationship = 4.4398216294e-24 | u
hertz_hyphen_electron_volt_relationship = 4.13566733e-15 | eV
hertz_hyphen_hartree_relationship = 1.51982984601e-16 | E_h
inverse_meter_hyphen_atomic_mass_unit_relationship = 1.3310250394e-15 | u
inverse_meter_hyphen_electron_volt_relationship = 1.239841875e-06 | eV
inverse_meter_hyphen_hartree_relationship = 4.55633525276e-08 | E_h
joule_hyphen_atomic_mass_unit_relationship = 6700536410.0 | u
joule_hyphen_electron_volt_relationship = 6.24150965e+18 | eV
joule_hyphen_hartree_relationship = 2.29371269e+17 | E_h
kelvin_hyphen_atomic_mass_unit_relationship = 9.251098e-14 | u
kelvin_hyphen_electron_volt_relationship = 8.617343e-05 | eV
kelvin_hyphen_hartree_relationship = 3.1668153e-06 | E_h
kilogram_hyphen_atomic_mass_unit_relationship = 6.02214179e+26 | u
kilogram_hyphen_electron_volt_relationship = 5.60958912e+35 | eV
kilogram_hyphen_hartree_relationship = 2.06148616e+34 | E_h
muon_mass_energy_equivalent_in_MeV = 105.6583668 | MeV
muon_mass_in_u = 0.1134289256 | u
natural_unit_of_action_in_eV_s = 6.58211899e-16 | eV*s
natural_unit_of_energy_in_MeV = 0.51099891 | MeV
natural_unit_of_momentum_in_MeV_div_c = 0.51099891 | MeV/c
neutron_mass_energy_equivalent_in_MeV = 939.565346 | MeV
neutron_mass_in_u = 1.00866491597 | u
Newtonian_constant_of_gravitation_over_h_hyphen_bar_c = 6.70881e-39 | (GeV/c**2)**-2
nuclear_magneton_in_eV_div_T = 3.1524512326e-08 | eV*T**-1
heV = 4.13566733e-15 | eV*s
Planck_constant_over_2_pi_in_eV_s = 6.58211899e-16 | eV*s
Planck_constant_over_2_pi_times_c_in_MeV_fm = 197.3269631 | MeV*fm
Planck_mass_energy_equivalent_in_GeV = 1.220892e+19 | GeV
proton_mass_energy_equivalent_in_MeV = 938.272013 | MeV
proton_mass_in_u = 1.00727646677 | u
Rydberg_constant_times_hc_in_eV = 13.60569193 | eV
tau_mass_energy_equivalent_in_MeV = 1776.99 | MeV
tau_mass_in_u = 1.90768 | u
triton_mass_energy_equivalent_in_MeV = 2808.920906 | MeV
triton_mass_in_u = 3.0155007134 | u
"""
| 18,324
| 51.357143
| 93
|
py
|
amuse
|
amuse-main/src/amuse/units/trigo.py
|
import numpy
from amuse.units import quantities
from amuse.units.units import rad,deg,rev,pi
#trigonometric convenience functions which are "unit aware"
sin=lambda x: numpy.sin(1.*x)
cos=lambda x: numpy.cos(1.*x)
tan=lambda x: numpy.tan(1.*x)
arcsin=lambda x: numpy.arcsin(x) | rad
arccos=lambda x: numpy.arccos(x) | rad
arctan=lambda x: numpy.arctan(x) | rad
arctan2=lambda x,y: numpy.arctan2(x,y) | rad
def to_rad(angle):
return quantities.as_quantity_in(angle,rad)
def to_deg(angle):
return quantities.as_quantity_in(angle,deg)
def to_rev(angle):
return quantities.as_quantity_in(angle,rev)
def in_rad(angle):
return quantities.value_in(angle,rad)
def in_deg(angle):
return quantities.value_in(angle,deg)
def in_rev(angle):
return quantities.value_in(angle,rev)
| 780
| 27.925926
| 59
|
py
|
amuse
|
amuse-main/src/amuse/units/nbody_system.py
|
"""
The n-body unit system knows the three base quantities in the
International System of Quantities, I.S.Q. and defines
the gravitational constant to be 1:
G = 1 | (length**3) / (mass * (time**2))
+-------------------+-----------------------------------+-----------------+
|Base quantity |Name in generic unit |Name in S.I. unit|
+-------------------+-----------------------------------+-----------------+
|length |generic_system.length |units.m |
+-------------------+-----------------------------------+-----------------+
|time |generic_system.time |units.s |
+-------------------+-----------------------------------+-----------------+
|mass |generic_system.mass |units.kg |
+-------------------+-----------------------------------+-----------------+
Derived quantities
~~~~~~~~~~~~~~~~~~
+------------------+--------------------------------+
|acceleration |length / (time ** 2) |
+------------------+--------------------------------+
|potential |(length ** 2) / (time ** 2) |
+------------------+--------------------------------+
|energy |mass * potential |
+------------------+--------------------------------+
|specific_energy |potential |
+------------------+--------------------------------+
|speed |length / time |
+------------------+--------------------------------+
|volume |(length ** 3) |
+------------------+--------------------------------+
|density |mass / volume |
+------------------+--------------------------------+
|momentum_density |density * speed |
+------------------+--------------------------------+
|energy_density |density * specific_energy |
+------------------+--------------------------------+
"""
from amuse.units import units
from amuse.units import core
from amuse.units import constants
from amuse.units import generic_unit_converter
from amuse.units import generic_unit_system
from amuse.units.quantities import new_quantity
from amuse.support import exceptions
import numpy
"""
"""
class nbody_unit(core.base_unit):
def __init__(self, unit_in_si, system):
core.base_unit.__init__(self, unit_in_si.quantity, unit_in_si.name, unit_in_si.symbol, system)
self.unit_in_si = unit_in_si
def __str__(self):
return 'nbody '+self.unit_in_si.quantity
def is_generic(self):
return True
length = generic_unit_system.length
time = generic_unit_system.time
mass = generic_unit_system.mass
acceleration = length / (time ** 2)
potential = (length ** 2) / (time ** 2)
energy = mass * potential
specific_energy = potential
speed = length / time
volume = (length ** 3)
density = mass / volume
pressure = mass / length / (time ** 2)
momentum_density = density * speed
energy_density = density * specific_energy
G = 1. | (length**3) / (mass * (time**2))
def is_nbody_unit(unit):
for factor, x in unit.base:
if x.is_generic():
return True
return False
class SiToNBodyConverter(object):
def __init__(self, nbody_to_si):
self.nbody_to_si = nbody_to_si
def from_source_to_target(self, quantity):
if hasattr(quantity, 'unit') and not quantity.unit.is_non_numeric():
return self.nbody_to_si.to_nbody(quantity)
else:
return quantity
def from_target_to_source(self, quantity):
if hasattr(quantity, 'unit') and not quantity.unit.is_non_numeric():
return self.nbody_to_si.to_si(quantity)
else:
return quantity
class nbody_to_si(generic_unit_converter.ConvertBetweenGenericAndSiUnits):
def __init__(self, value1, value2):
generic_unit_converter.ConvertBetweenGenericAndSiUnits.__init__(self,constants.G, value1, value2)
def to_nbody(self, value):
return self.to_generic(value)
def _old_unit_to_unit_in_nbody(self, unit):
nbody_units_in_si = self.units
base = unit.base
factor = unit.factor
new_unit = 1
for n, unit in base:
unit_in_nbody, unit_in_si = self.find_nbody_unit_for(unit)
if not unit_in_si is None:
factor = factor / (unit_in_si.factor ** n)
new_unit *= (unit_in_nbody.base[0][1] ** n)
else:
new_unit *= (unit ** n)
return factor * new_unit
def as_converter_from_si_to_nbody(self):
return self.as_converter_from_si_to_generic()
def as_converter_from_nbody_to_si(self):
return self.as_converter_from_generic_to_si()
def as_converter_from_si_to_generic(self):
return SiToNBodyConverter(self)
| 4,912
| 34.345324
| 105
|
py
|
amuse
|
amuse-main/src/amuse/units/units.py
|
import numpy
from amuse.units.si import (
named, s, m, kg, none, core,
)
from amuse.units.derivedsi import (
km, V, W, J, Pa, rad,
)
from amuse.units.si import *
from amuse.units.derivedsi import *
from amuse.units import constants
from . import quantities
# misc every day
minute = named('minute', 'min', 60.0 * s)
hour = named('hour', 'hr', 60.0 * minute)
day = named('day', 'day', 24.0 * hour)
yr = named('year', 'yr', 365.242199 * day)
julianyr = named('julian yr', 'julianyr', 365.25 * day)
ms = named('meter per seconds', 'ms', m / s)
kms = named('kilometer per seconds', 'kms', km / s)
# units based on measured quantities
e = named('electron charge', 'e', constants.elementary_charge.as_unit())
eV = named('electron volt', 'eV', e*V)
MeV = named('mega electron volt', 'MeV', 1e6*eV)
GeV = named('giga electron volt', 'GeV', 1e9*eV)
E_h = named('hartree energy', 'E_h', constants.Hartree_energy.as_unit())
amu = named('atomic mass unit', 'amu', constants.u.as_unit())
Ry = named('rydberg unit', 'Ry', (
constants.Rydberg_constant * constants.h * constants.c
).as_quantity_in(eV).as_unit())
# astronomical units
angstrom = named('angstrom', 'angstrom', 1e-10*m)
AU = named('astronomical unit', 'AU', 149597870691.0 * m)
au = named('astronomical unit', 'au', 149597870691.0 * m)
AUd = named('AU per day', 'AUd', 149597870691.0 * m / day)
parsec = named('parsec', 'parsec', AU / numpy.tan(numpy.pi/(180*60*60)))
kpc = named('kilo parsec', 'kpc', 10**3 * parsec)
Mpc = named('mega parsec', 'Mpc', 10**6 * parsec)
Gpc = named('giga parsec', 'Gpc', 10**9 * parsec)
lightyear = named('light year', 'ly', 9460730472580.8 * km)
# lightyear = named('light year', 'ly', c*julianyr)
LSun = named('solar luminosity', 'LSun', 3.839e26 * W)
MSun = named('solar mass', 'MSun', 1.98892e30 * kg)
MJupiter = named('jupiter mass', 'MJupiter', 1.8987e27 * kg)
MEarth = named('earth mass', 'MEarth', 5.9722e24 * kg)
RSun = named('solar radius', 'RSun', 6.955e8 * m)
RJupiter = named('jupiter radius', 'RJupiter', 71492. * km)
REarth = named('earth radius', 'REarth', 6371.0088 * km) # IUGG mean radius
kyr = named('kilo year', 'kyr', 1000 * yr)
myr = named('million year', 'Myr', 1000000 * yr)
Myr = myr
gyr = named('giga (billion) year', 'Gyr', 1000000000 * yr)
Gyr = gyr
pc = parsec
# cgs units
g = named('gram', 'g', 1e-3 * kg)
cm = named('centimeter', 'cm', 0.01*m)
erg = named('erg', 'erg', 1e-7 * J)
barye = named('barye', 'Ba', 0.1*Pa)
# imperial distance units
inch = named('inch', 'in', 0.0254 * m)
foot = named('foot', 'ft', 0.3048 * m)
mile = named('mile', 'mi', 1609.344 * m)
percent = named('percent', '%', 0.01 * none)
metallicity = core.none_unit('metallicity', 'metallicity')
string = core.string_unit('string', 'string')
stellar_type = core.enumeration_unit(
'stellar_type',
'stellar_type',
None,
[
"deeply or fully convective low mass MS star", # 0
"Main Sequence star", # 1
"Hertzsprung Gap", # 2
"First Giant Branch", # 3
"Core Helium Burning", # 4
"First Asymptotic Giant Branch", # 5
"Second Asymptotic Giant Branch", # 6
"Main Sequence Naked Helium star", # 7
"Hertzsprung Gap Naked Helium star", # 8
"Giant Branch Naked Helium star", # 9
"Helium White Dwarf", # 10
"Carbon/Oxygen White Dwarf", # 11
"Oxygen/Neon White Dwarf", # 12
"Neutron Star", # 13
"Black Hole", # 14
"Massless Supernova", # 15
"Unknown stellar type", # 16
"Pre-main-sequence Star", # 17
"Planet", # 18
"Brown Dwarf", # 19
]
)
# special unit for keys of particles
object_key = core.key_unit('object_key', 'key')
# angles
# rad=named('radian','rad',m/m) (defined in derivedsi.py)
pi = numpy.pi | rad
rev = named('revolutions', 'rev', (2*numpy.pi) * rad)
deg = named('degree', 'deg', (numpy.pi/180) * rad)
arcmin = named('arcminutes', 'arcmin', (1./60) * deg)
arcsec = named('arcseconds', 'arcsec', (1./3600) * deg)
| 4,019
| 34.892857
| 77
|
py
|
amuse
|
amuse-main/src/amuse/units/generic_unit_system.py
|
"""
The generic unit system knows the seven base quantities in the
International System of Quantities, I.S.Q.
+-------------------+-----------------------------------+-----------------+
|Base quantity |Name in generic unit |Name in S.I. unit|
+-------------------+-----------------------------------+-----------------+
|length |generic_system.length |units.m |
+-------------------+-----------------------------------+-----------------+
|time |generic_system.time |units.s |
+-------------------+-----------------------------------+-----------------+
|mass |generic_system.mass |units.kg |
+-------------------+-----------------------------------+-----------------+
|current |generic_system.current |units.A |
+-------------------+-----------------------------------+-----------------+
|temperature |generic_system.temperature |units.K |
+-------------------+-----------------------------------+-----------------+
|amount of substance|generic_system.amount_of_substance |units.mol |
+-------------------+-----------------------------------+-----------------+
|luminous intensity |generic_system.luminous_intensity |units.cd |
+-------------------+-----------------------------------+-----------------+
"""
from amuse.units import units
from amuse.units import core
class generic_unit(core.base_unit):
def __init__(self, unit_in_si, system):
core.base_unit.__init__(self, unit_in_si.quantity, unit_in_si.name, unit_in_si.symbol, system)
self.unit_in_si = unit_in_si
def __str__(self):
return self.unit_in_si.quantity
def is_generic(self):
return True
generic_system = core.system('generic')
length = generic_unit(units.m, generic_system)
time = generic_unit(units.s, generic_system)
mass = generic_unit(units.kg, generic_system)
current = generic_unit(units.A, generic_system)
temperature = generic_unit(units.K, generic_system)
luminous_intensity = generic_unit(units.cd, generic_system)
acceleration = length / (time ** 2)
force = mass*acceleration
potential = (length ** 2) / (time ** 2)
energy = mass * potential
specific_energy = potential
speed = length / time
volume = (length ** 3)
density = mass / volume
momentum_density = density * speed
energy_density = density * specific_energy
charge = current * time
pressure = mass / length / (time ** 2)
def is_generic_unit(unit):
for factor, x in unit.base:
if x.is_generic():
return True
return False
def generic_to_si(*arg):
from amuse.units import generic_unit_converter
return generic_unit_converter.ConvertBetweenGenericAndSiUnits(*arg)
| 2,792
| 38.9
| 102
|
py
|
amuse
|
amuse-main/src/amuse/units/quantities.py
|
import numpy
import operator
from math import sqrt
from amuse.support import exceptions
from amuse.support import console
from amuse.support.core import late
from amuse.support.core import compare_version_strings
from amuse.units import core
from amuse.units.si import none
from amuse.units.core import zero_unit
try:
import astropy.units
import amuse.units.si
HAS_ASTROPY = True
except ImportError:
HAS_ASTROPY = False
class Quantity:
"""
A Quantity objects represents a scalar or vector with a
specific unit. Quantity is an abstract base class
for VectorQuantity and ScalarQuantity.
Quantities should be constructed using *or-operator* ("|"),
*new_quantity* or *unit.new_quantity*.
Quantities emulate numeric types.
Examples
>>> from amuse.units import units
>>> 100 | units.m
quantity<100 m>
>>> (100 | units.m) + (1 | units.km)
quantity<1100.0 m>
Quantities can be tested
>>> from amuse.units import units
>>> x = 100 | units.m
>>> x.is_quantity()
True
>>> x.is_scalar()
True
>>> x.is_vector()
False
>>> v = [100, 200, 300] | units.g
>>> v.is_quantity()
True
>>> v.is_scalar()
False
>>> v.is_vector()
True
Quantities can be converted to numbers
>>> from amuse.units import units
>>> x = 1000.0 | units.m
>>> x.value_in(units.m)
1000.0
>>> x.value_in(units.km)
1.0
>>> x.value_in(units.g) # but only if the units are compatible!
Traceback (most recent call last):
File "<stdin>", line 1, in ?
IncompatibleUnitsException: Cannot express m in g, the units do not have the same bases
"""
__slots__ = ['unit']
__array_priority__ = 101
def __init__(self, unit):
self.unit = unit
def __str__(self):
return console.current_printing_strategy.quantity_to_string(self)
def is_quantity(self):
"""
True for all quantities.
"""
return True
def is_scalar(self):
"""
True for scalar quantities.
"""
return False
def is_vector(self):
"""
True for vector quantities.
"""
return False
def __repr__(self):
return 'quantity<'+str(self)+'>'
def __add__(self, other):
if self.unit.is_zero():
other=to_quantity(other)
return new_quantity(other.number, other.unit)
else:
other = to_quantity(other)
factor = other.unit.conversion_factor_from(self.unit)
return new_quantity(self.number + factor*other.number, self.unit)
__radd__ = __add__
def __sub__(self, other):
if self.unit.is_zero():
return -other
else:
other_in_my_units = to_quantity(other).as_quantity_in(self.unit)
return new_quantity(self.number - other_in_my_units.number, self.unit)
def __rsub__(self, other):
if self.unit.is_zero():
return new_quantity(other.number, other.unit)
other_in_my_units = to_quantity(other).as_quantity_in(self.unit)
return new_quantity(other_in_my_units.number - self.number, self.unit)
def __mul__(self, other):
other = to_quantity(other)
return new_quantity_nonone(self.number * other.number, (self.unit * other.unit).to_simple_form())
__rmul__ = __mul__
def __pow__(self, other):
return new_quantity(self.number ** other, self.unit ** other)
def __truediv__(self, other):
other = to_quantity(other)
return new_quantity_nonone(operator.__truediv__(self.number,other.number), (self.unit / other.unit).to_simple_form())
def __rtruediv__(self, other):
return new_quantity_nonone(operator.__truediv__(other,self.number), (1.0 / self.unit).to_simple_form())
def __floordiv__(self, other):
other = to_quantity(other)
return new_quantity_nonone(operator.__floordiv__(self.number,other.number), (self.unit / other.unit).to_simple_form())
def __rfloordiv__(self, other):
return new_quantity_nonone(operator.__floordiv__(other,self.number), (1.0 / self.unit).to_simple_form())
def __div__(self, other):
other = to_quantity(other)
return new_quantity_nonone(self.number/other.number, (self.unit / other.unit).to_simple_form())
def __rdiv__(self, other):
return new_quantity_nonone(other/self.number, (1.0 / self.unit).to_simple_form())
def __mod__(self, other):
other_in_my_units = to_quantity(other).as_quantity_in(self.unit)
return new_quantity_nonone(numpy.mod(self.number , other_in_my_units.number), self.unit)
def __rmod__(self, other):
other_in_my_units = to_quantity(other).as_quantity_in(self.unit)
return new_quantity_nonone(numpy.mod(other_in_my_units.number , self.number), self.unit)
def in_base(self):
unit=self.unit.base_unit()
return self.as_quantity_in(unit)
def sqrt(self):
"""Calculate the square root of each component
>>> from amuse.units import units
>>> s1 = 144.0 | units.m**2
>>> s1.sqrt()
quantity<12.0 m>
>>> v1 = [16.0, 25.0, 36.0] | units.kg
>>> v1.sqrt()
quantity<[4.0, 5.0, 6.0] kg**0.5>
"""
return new_quantity(numpy.sqrt(self.number), (self.unit ** 0.5).to_simple_form())
def as_quantity_in(self, another_unit):
"""
Reproduce quantity in another unit.
The new unit must have the same basic si quantities.
:argument another_unit: unit to convert quantity to
:returns: quantity converted to new unit
"""
if isinstance(another_unit, Quantity):
raise exceptions.AmuseException("Cannot expres a unit in a quantity")
factor = self.unit.conversion_factor_from(another_unit)
return new_quantity(self.number * factor, another_unit)
in_ = as_quantity_in
def as_string_in(self, another_unit):
"""
Create a string representing the quantity in another unit.
The new unit must have the same basic si quantities.
:argument another_unit: unit to convert quantity to
:returns: string representing quantity converted to new unit
"""
return console.DefaultPrintingStrategy().quantity_to_string(self.as_quantity_in(another_unit))
def value_in(self, unit):
"""
Return a numeric value (for scalars) or array (for vectors)
in the given unit.
A number is returned without any unit information. Use this
function only to transfer values to other libraries that have
no support for quantities (for example plotting).
:argument unit: wanted unit of the value
:returns: number in the given unit
>>> from amuse.units import units
>>> x = 10 | units.km
>>> x.value_in(units.m)
10000.0
"""
value_of_unit_in_another_unit = self.unit.value_in(unit)
return self.number * value_of_unit_in_another_unit
def __abs__(self):
"""
Return the absolute value of this quantity
>>> from amuse.units import units
>>> x = -10 | units.km
>>> print abs(x)
10 km
"""
return new_quantity(abs(self.number), self.unit)
def __neg__(self):
"""
Unary minus.
>>> from amuse.units import units
>>> x = -10 | units.km
>>> print -x
10 km
"""
return new_quantity(-self.number, self.unit)
def __lt__(self, other):
return self.value_in(self.unit) < to_quantity(other).value_in(self.unit)
def __gt__(self, other):
return self.value_in(self.unit) > to_quantity(other).value_in(self.unit)
def __eq__(self, other):
return self.value_in(self.unit) == to_quantity(other).value_in(self.unit)
def __ne__(self, other):
return self.value_in(self.unit) != to_quantity(other).value_in(self.unit)
def __le__(self, other):
return self.value_in(self.unit) <= to_quantity(other).value_in(self.unit)
def __ge__(self, other):
return self.value_in(self.unit) >= to_quantity(other).value_in(self.unit)
if HAS_ASTROPY:
def as_astropy_quantity(self):
return to_astropy(self)
class ScalarQuantity(Quantity):
"""
A ScalarQuantity object represents a physical scalar
quantity.
"""
__slots__ = ['number']
def __init__(self, number, unit):
# Quantity.__init__(self, unit)
# commented out super call, this speeds thing up
self.unit = unit
if isinstance(number, str):
try:
number = float(number)
except ValueError: # needed to handle interfaces
pass
if unit.dtype is None:
self.number = number
else:
if isinstance(unit.dtype, numpy.dtype):
self.number = unit.dtype.type(number)
else:
self.number = unit.dtype(number)
def is_scalar(self):
return True
def as_vector_with_length(self, length):
return VectorQuantity(numpy.ones(length, dtype=self.unit.dtype) * self.number, self.unit)
def reshape(self, shape):
if shape == -1 or (len(shape) == 1 and shape[0] == 1):
return VectorQuantity([self.number], self.unit)
else:
raise exceptions.AmuseException("Cannot reshape a scalar to vector of shape '{0}'".format(shape))
def __getitem__(self, index):
if index == 0:
return self
else:
raise exceptions.AmuseException("ScalarQuantity does not support indexing")
def copy(self):
return new_quantity(self.number, self.unit)
def to_unit(self):
in_base=self.in_base()
return in_base.number * in_base.unit
def __getstate__(self):
return (self.unit, self.number)
def round(self, decimals = 0):
return new_quantity(numpy.round(self.number, decimals), self.unit)
def new_zeros_array(self, length):
array = numpy.zeros(length, dtype=self.unit.dtype)
return new_quantity(array, self.unit)
def __setstate__(self, x):
self.unit = x[0]
self.number = x[1]
def sum(self, axis=None, dtype=None, out=None):
return self
def cumsum(self, axis=None, dtype=None, out=None):
return self
def prod(self, axis=None, dtype=None):
return self
def min(self, axis = None):
return self
def max(self, axis = None):
return self
amin=min
amax=max
def sorted(self):
return self
def as_unit(self):
return self.number * self.unit
class _flatiter_wrapper(object):
def __init__(self, quantity):
self.flat=quantity.number.flat
self.quantity=quantity
def __iter__(self):
return self
def __next__(self):
return new_quantity(next(self.flat),self.quantity.unit)
def __getitem__(self,x):
return new_quantity(self.flat[x], self.quantity.unit)
def __setitem__(self,index,x):
return self.flat.__setitem__(index,x.value_in(self.quantity.unit))
@property
def base(self):
return self.quantity
@property
def index(self):
return self.flat.index
@property
def coords(self):
return self.flat.coords
@property
def unit(self):
return self.quantity.unit
@property
def number(self):
return self.flat
def copy(self):
return new_quantity(self.flat.copy(), self.quantity.unit)
def is_quantity(self):
return True
def value_in(self, unit):
return self.copy().value_in(unit)
def as_quantity_in(self, unit):
return self.copy().as_quantity_in(unit)
# todo: add as required
class VectorQuantity(Quantity):
"""
A VectorQuantity object represents a physical vector
quantity.
>>> from amuse.units import units
>>> v1 = [0.0, 1.0, 2.0] | units.kg
>>> v2 = [2.0, 4.0, 6.0] | units.kg
>>> v1 + v2
quantity<[2.0, 5.0, 8.0] kg>
>>> len(v1)
3
"""
__slots__ = ['_number']
def __init__(self, array, unit):
Quantity.__init__(self, unit)
if unit is None:
self._number = numpy.array((), dtype='float64')
else:
self._number = numpy.asarray(array, dtype=unit.dtype)
@classmethod
def new_from_scalar_quantities(cls, *values):
unit=to_quantity(values[0]).unit
try:
array=[value_in(x,unit) for x in values]
except core.IncompatibleUnitsException:
raise exceptions.AmuseException("not all values have conforming units")
return cls(array, unit)
@classmethod
def new_from_array(cls, array):
shape=array.shape
vector=cls.new_from_scalar_quantities(*array.flat)
return vector.reshape(shape)
def aszeros(self):
return new_quantity(numpy.zeros(self.shape, dtype=self.number.dtype), self.unit)
def new_zeros_array(self, length):
array = numpy.zeros(length, dtype=self.unit.dtype)
return type(self)(array, self.unit)
@classmethod
def zeros(cls, length, unit):
array = numpy.zeros(length, dtype=unit.dtype)
return cls(array, unit)
@classmethod
def arange(cls, begin, end, step):
return arange(begin, end, step)
@property
def shape(self):
return self.number.shape
@property
def dtype(self):
return self.number.dtype
def flatten(self):
return new_quantity(self.number.flatten(), self.unit)
@property
def flat(self):
return _flatiter_wrapper(self)
def is_vector(self):
return True
def as_vector_with_length(self, length):
if len(self)==length:
return self.copy()
if len(self)==1:
return self.new_from_scalar_quantities(*[self[0]]*length)
raise exceptions.AmuseException("as_vector_with_length only valid for same length or 1")
def as_vector_quantity(self):
return self
def __len__(self):
return len(self._number)
def split(self, indices_or_sections, axis = 0):
parts = numpy.split(self.number, indices_or_sections, axis)
return [VectorQuantity(x, self.unit) for x in parts]
def array_split(self, indices_or_sections, axis = 0):
parts = numpy.array_split(self.number, indices_or_sections, axis)
return [VectorQuantity(x, self.unit) for x in parts]
def sum(self, axis=None, dtype=None, out=None):
"""Calculate the sum of the vector components
>>> from amuse.units import units
>>> v1 = [0.0, 1.0, 2.0] | units.kg
>>> v1.sum()
quantity<3.0 kg>
"""
return new_quantity(self.number.sum(axis, dtype, out), self.unit)
def cumsum(self, axis=None, dtype=None, out=None):
""" Calculate the cumulative sum of the elements along a given axis. """
return new_quantity(numpy.cumsum(self.number, axis, dtype, out), self.unit)
def prod(self, axis=None, dtype=None):
"""Calculate the product of the vector components
>>> from amuse.units import units
>>> v1 = [1.0, 2.0, 3.0] | units.m
>>> v1.prod()
quantity<6.0 m**3>
>>> v1 = [[2.0, 3.0], [2.0, 4.0], [5.0,3.0] ] | units.m
>>> v1.prod()
quantity<720.0 m**6>
>>> v1.prod(0)
quantity<[20.0, 36.0] m**3>
>>> v1.prod(1)
quantity<[6.0, 8.0, 15.0] m**2>
>>> v1 = [[[2.0, 3.0], [2.0, 4.0]],[[5.0, 2.0], [3.0, 4.0]]] | units.m
>>> v1.prod() # doctest:+ELLIPSIS
quantity<5760.0 m**8...>
>>> v1.prod(0)
quantity<[[10.0, 6.0], [6.0, 16.0]] m**2>
>>> v1.prod(1)
quantity<[[4.0, 12.0], [15.0, 8.0]] m**2>
>>> v1.prod(2)
quantity<[[6.0, 8.0], [10.0, 12.0]] m**2>
"""
if axis is None:
return new_quantity_nonone(self.number.prod(axis, dtype), self.unit ** numpy.prod(self.number.shape))
else:
return new_quantity_nonone(self.number.prod(axis, dtype), self.unit ** self.number.shape[axis])
def inner(self, other):
"""Calculate the inner product of self with other.
>>> from amuse.units import units
>>> v1 = [1.0, 2.0, 3.0] | units.m
>>> v1.inner(v1)
quantity<14.0 m**2>
"""
other = to_quantity(other)
return new_quantity_nonone(numpy.inner(self._number, other._number), (self.unit * other.unit).to_simple_form())
def length_squared(self):
"""Calculate the squared length of the vector.
>>> from amuse.units import units
>>> v1 = [2.0, 3.0, 4.0] | units.m
>>> v1.length_squared()
quantity<29.0 m**2>
"""
return (self * self).sum()
def length(self):
"""Calculate the length of the vector.
>>> from amuse.units import units
>>> v1 = [0.0, 3.0, 4.0] | units.m
>>> v1.length()
quantity<5.0 m>
"""
return self.length_squared().sqrt()
def lengths(self):
"""Calculate the length of the vectors in this vector.
>>> from amuse.units import units
>>> v1 = [[0.0, 3.0, 4.0],[2.0 , 2.0 , 1.0]] | units.m
>>> v1.lengths()
quantity<[5.0, 3.0] m>
"""
return self.lengths_squared().sqrt()
def lengths_squared(self):
"""Calculate the length of the vectors in this vector
>>> from amuse.units import units
>>> v1 = [[0.0, 3.0, 4.0],[4.0, 2.0, 1.0]] | units.m
>>> v1.lengths_squared()
quantity<[25.0, 21.0] m**2>
"""
return (self.unit**2).new_quantity((self.number * self.number).sum(self.number.ndim - 1))
def __getitem__(self, index):
"""Return the "index" component as a quantity.
:argument index: index of the component, valid values
for 3 dimensional vectors are: ``[0,1,2]``
:returns: quantity with the same units
>>> from amuse.units import si
>>> vector = [0.0, 1.0, 2.0] | si.kg
>>> print vector[1]
1.0 kg
>>> print vector[0:2]
[0.0, 1.0] kg
>>> print vector[[0,2,]]
[0.0, 2.0] kg
"""
return new_quantity(self._number[index], self.unit)
def take(self, indices):
return VectorQuantity(self._number.take(indices), self.unit)
def put(self, indices, vector):
try:
if self.unit.is_zero():
self.unit = vector.unit
self._number.put(indices, vector.value_in(self.unit))
except AttributeError:
if not is_quantity(vector):
raise ValueError("Tried to put a non quantity value in a quantity")
raise
def __setitem__(self, index, quantity):
"""Update the "index" component to the specified quantity.
:argument index: index of the component, valid values
for 3 dimensional vectors are: ``[0,1,2]``
:quantity: quantity to set, will be converted to
the unit of this vector
>>> from amuse.units import si
>>> vector = [0.0, 1.0, 2.0] | si.kg
>>> g = si.kg / 1000
>>> vector[1] = 3500 | g
>>> print vector
[0.0, 3.5, 2.0] kg
"""
quantity = as_vector_quantity(quantity)
if self.unit.is_zero():
self.unit = quantity.unit
if (
isinstance(quantity, VectorQuantity)
):
if len(quantity) == 1:
self._number[index] = quantity[0].value_in(self.unit)
else:
self._number[index] = quantity[:].value_in(self.unit)
else:
self._number[index] = quantity.value_in(self.unit)
@property
def number(self):
return self._number
@property
def x(self):
"""The x axis component of a 3 dimensional vector.
This is equavalent to the first component of vector.
:returns: x axis component as a quantity
>>> from amuse.units import si
>>> vector = [1.0, 2.0, 3.0] | si.kg
>>> print vector.x
1.0 kg
"""
return new_quantity(self.number[numpy.newaxis, ..., 0][0], self.unit)
@property
def y(self):
"""The y axis component of a 3 dimensional vector.
This is equavalent to the second component of vector.
:returns: y axis component as a quantity
>>> from amuse.units import si
>>> vector = [1.0, 2.0, 3.0] | si.kg
>>> print vector.y
2.0 kg
"""
return new_quantity(self.number[numpy.newaxis, ..., 1][0], self.unit)
@property
def z(self):
"""The z axis component of a 3 dimensional vector.
This is equavalent to the third component of vector.
:returns: z axis component as a quantity
>>> from amuse.units import si
>>> vector = [1.0, 2.0, 3.0] | si.kg
>>> print vector.z
3.0 kg
"""
return new_quantity(self.number[numpy.newaxis, ..., 2][0], self.unit)
def indices(self):
for x in len(self._number):
yield x
def copy(self):
return new_quantity(self.number.copy(), self.unit)
def norm_squared(self):
return self.length_squared()
def norm(self):
return self.length()
def append(self, scalar_quantity):
"""
Append a scalar quantity to this vector.
>>> from amuse.units import si
>>> vector = [1.0, 2.0, 3.0] | si.kg
>>> vector.append(4.0 | si.kg)
>>> print vector
[1.0, 2.0, 3.0, 4.0] kg
"""
append_number = numpy.array(scalar_quantity.value_in(self.unit)) # fix for deg, unitless
# The following lines make sure that appending vectors works as expected,
# e.g. ([]|units.m).append([1,2,3]|units.m) -> [[1,2,3]] | units.m
# e.g. ([[1,2,3]]|units.m).append([4,5,6]|units.m) -> [[1,2,3],[4,5,6]] | units.m
if (append_number.shape and (len(self._number) == 0 or self._number.shape[1:] == append_number.shape)):
new_shape = [1 + self._number.shape[0]] + list(append_number.shape)
else:
new_shape = -1
self._number = numpy.append(self._number, append_number).reshape(new_shape)
def extend(self, vector_quantity):
"""
Concatenate the vector quantity to this vector.
If the units differ, the vector_quantity argument
is converted to the units of this vector.
>>> from amuse.units import units
>>> vector1 = [1.0, 2.0, 3.0] | units.kg
>>> vector2 = [1500, 2500, 6000] | units.g
>>> vector1.extend(vector2)
>>> print vector1
[1.0, 2.0, 3.0, 1.5, 2.5, 6.0] kg
"""
self._number = numpy.concatenate((self._number, vector_quantity.value_in(self.unit)))
def prepend(self, scalar_quantity):
"""
Prepend the scalar quantity before this vector.
If the units differ, the scalar_quantity argument
is converted to the units of this vector.
>>> from amuse.units import units
>>> vector1 = [1.0, 2.0, 3.0] | units.kg
>>> vector1.prepend(0.0 | units.kg)
>>> print vector1
[0.0, 1.0, 2.0, 3.0] kg
"""
self._number = numpy.concatenate(([scalar_quantity.value_in(self.unit)], self._number))
def minimum(self, other):
"""
Return the minimum of self and the argument.
>>> from amuse.units import si
>>> v1 = [1.0, 2.0, 3.0] | si.kg
>>> v2 = [0.0, 3.0, 4.0] | si.kg
>>> v1.minimum(v2)
quantity<[0.0, 2.0, 3.0] kg>
"""
other_in_my_units = other.as_quantity_in(self.unit)
is_smaller_than = self.number < other_in_my_units.number
values = numpy.where(is_smaller_than, self.number, other_in_my_units.number)
return VectorQuantity(values, self.unit)
def maximum(self, other):
"""
Return the maximum of self and the argument.
>>> from amuse.units import si
>>> v1 = [1.0, 2.0, 3.0] | si.kg
>>> v2 = [0.0, 3.0, 4.0] | si.kg
>>> v1.maximum(v2)
quantity<[1.0, 3.0, 4.0] kg>
"""
other_in_my_units = other.as_quantity_in(self.unit)
is_larger_than = self.number > other_in_my_units.number
values = numpy.where(is_larger_than, self.number, other_in_my_units.number)
return VectorQuantity(values, self.unit)
def max(self, axis = None):
"""
Return the maximum along an axis.
>>> from amuse.units import si
>>> v1 = [1.0, 2.0, 3.0] | si.kg
>>> v1.amax()
quantity<3.0 kg>
"""
return self.unit.new_quantity(numpy.amax(self.number, axis = axis))
def min(self, axis = None):
"""
Return the minimum value along an axis.
>>> from amuse.units import si
>>> v1 = [1.0, 2.0, 3.0] | si.kg
>>> v1.amin()
quantity<1.0 kg>
"""
return self.unit.new_quantity(numpy.amin(self.number, axis = axis))
amin=min
amax=max
def argmax(self, axis = None):
"""
Return the indices of the maximum values along an axis.
>>> from amuse.units import si
>>> v1 = [[1.0, 2.0, 3.0], [2.5, 2.5, 2.5]] | si.kg
>>> v1.argmax(axis=0)
array([1, 1, 0])
"""
return numpy.argmax(self.number, axis = axis)
def argmin(self, axis = None):
"""
Return the indices of the minimum values along an axis.
>>> from amuse.units import si
>>> v1 = [[1.0, 2.0, 3.0], [2.5, 2.5, 2.5]] | si.kg
>>> v1.argmin(axis=0)
array([0, 0, 1])
"""
return numpy.argmin(self.number, axis = axis)
def sorted(self):
"""
Return a new vector with all items sorted.
>>> from amuse.units import si
>>> v1 = [3.0, 1.0, 2.0] | si.kg
>>> v1.sorted()
quantity<[1.0, 2.0, 3.0] kg>
"""
sorted_values = numpy.sort(self.number)
return VectorQuantity(sorted_values, self.unit)
def argsort(self, **options):
"""
Returns the indices that would sort an array.
>>> from amuse.units import si
>>> v1 = [3.0, 1.0, 2.0] | si.kg
>>> v1.argsort()
array([1, 2, 0])
"""
return numpy.argsort(self.number, **options)
def argmax(self, **options):
"""
Returns the index of the maximum item
>>> from amuse.units import si
>>> v1 = [1.0, 3.0, 2.0] | si.kg
>>> v1.argmax()
1
"""
return numpy.argmax(self.number, **options)
def sorted_with(self, *others):
"""
Return a new vector with all items sorted. Perform
all the same move operations on the other vectors.
:argument: kind, the sort method for supported kinds see
the numpy.sort documentation
>>> from amuse.units import si
>>> v1 = [3.0, 1.0, 2.0] | si.kg
>>> v2 = [2.0, 3.0, 2.0] | si.m
>>> v3 = [1.0, 4.0, 5.0] | si.s
>>> list(v1.sorted_with(v2, v3))
[quantity<[1.0, 2.0, 3.0] kg>, quantity<[3.0, 2.0, 2.0] m>, quantity<[4.0, 5.0, 1.0] s>]
"""
indices = numpy.lexsort([self.number])
vectors = []
vectors.append(self)
for x in others:
vectors.append(x)
for x in vectors:
yield VectorQuantity(numpy.take(x.number, indices), x.unit)
def accumulate(self):
return VectorQuantity(numpy.add.accumulate(self.number), self.unit)
def reshape(self, shape):
return VectorQuantity(self.number.reshape(shape), self.unit)
def transpose(self, axes=None):
return VectorQuantity(self.number.transpose(axes), self.unit)
@property
def T(self):
return VectorQuantity(self.number.T, self.unit)
def mean(self, axis=None, dtype=None, out=None):
return new_quantity(self.number.mean(axis, dtype, out), self.unit)
def median(self, **kwargs):
return new_quantity(numpy.median(self.number, **kwargs), self.unit)
def std(self, axis=None, dtype=None, out=None, ddof=0):
return new_quantity(self.number.std(axis, dtype, out, ddof), self.unit)
def cross(self, other, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of this vector quantity with the supplied vector (quantity).
"""
other = to_quantity(other)
return new_quantity_nonone(
numpy.cross(self.number, other.number, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis),
(self.unit * other.unit).to_simple_form()
)
def dot(self, other, **kwargs):
"""
Return the dot product of this vector quantity with the supplied vector (quantity).
>>> from amuse.units import units
>>> v1 = [1.0, 2.0, 3.0] | units.m
>>> v1.dot(v1)
quantity<14.0 m**2>
"""
other = to_quantity(other)
return new_quantity_nonone(
numpy.dot(self.number, other.number, **kwargs),
(self.unit * other.unit).to_simple_form()
)
def __getstate__(self):
return (self.unit, self.number)
def __setstate__(self, x):
self.unit = x[0]
self._number = x[1]
class ZeroQuantity(Quantity):
"""
A ZeroQuantity object represents zero in all units and
can be used as the start for summing up purposes.
>>> from amuse.units import si
>>> x = zero
>>> x += 2.0 | si.kg
>>> x
quantity<2.0 kg>
"""
def __init__(self):
Quantity.__init__(self, zero_unit())
self.base = ()
self.factor = 1
self.number = 0.0
self.dtype = 'float64'
def is_scalar(self):
"""
True for scalar quantities.
"""
return False
def is_vector(self):
"""
True for vector quantities.
"""
return False
def is_non_numeric(self):
return False
def iskey(self):
return False
def __str__(self):
return "zero"
def __add__(self, other):
return other
def __sub__(self, other):
return -other
def __mul__(self, other):
return self
def __pow__(self, other):
return self
def __rmul__(self, other):
return self
def __truediv__(self, other):
return self
def __rtruediv__(self, other):
return other/self.number
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
def in_base(self):
return self
def new_zeros_array(self, length):
array = numpy.zeros(length, dtype=self.dtype)
return new_quantity(array, zero_unit())
def sqrt(self):
return self
def as_quantity_in(self, another_unit):
return new_quantity(self.number, another_unit)
def value_in(self, unit):
return self.number
def __abs__(self):
return self
def __neg__(self):
return self
def as_vector_with_length(self, length):
return self.new_zeros_array(length)
def __reduce__(self):
return "zero"
def __lt__(self, other):
other_as_q=to_quantity(other)
return 0 < other_as_q.value_in(other_as_q.unit)
def __gt__(self, other):
other_as_q=to_quantity(other)
return 0 > other_as_q.value_in(other_as_q.unit)
def __eq__(self, other):
other_as_q=to_quantity(other)
return 0 == other_as_q.value_in(other_as_q.unit)
def __ne__(self, other):
other_as_q=to_quantity(other)
return 0 != other_as_q.value_in(other_as_q.unit)
def __le__(self, other):
other_as_q=to_quantity(other)
return 0 <= other_as_q.value_in(other_as_q.unit)
def __ge__(self, other):
other_as_q=to_quantity(other)
return 0 >= other_as_q.value_in(other_as_q.unit)
zero = ZeroQuantity()
class NonNumericQuantity(Quantity):
"""
A Non Numeric Quantity object represents a quantity without
a physical meaning.
These Quantity objects cannot be used in
numeric operations (like addition or multiplication). Also,
conversion to another unit is not possible.
Examples are string quantities or enumerated value quantities.
>>> from amuse.units.core import enumeration_unit
>>> my_unit = enumeration_unit(
... "x",
... "x",
... [1,3,4],
... ["first", "second", "third"])
...
>>> 3 | my_unit
quantity<3 - second>
>>> (3 | my_unit).value_in(my_unit)
3
"""
def __init__(self, value, unit):
Quantity.__init__(self, unit)
self.value = value
if not unit.is_valid_value(value):
raise exceptions.AmuseException(f"<{value}> is not a valid value for {unit!r}")
def as_quantity_in(self, another_unit):
if not another_unit == self.unit:
raise exceptions.AmuseException("Cannot convert non-numeric quantities in to another unit")
return new_quantity(self.value, another_unit)
def value_in(self, unit):
if not unit == self.unit:
raise exceptions.AmuseException("Cannot convert non-numeric quantities in to another unit")
return self.value
def __str__(self):
return self.unit.value_to_string(self.value)
def __repr__(self):
return f'quantity<{str(self.value)} - {str(self)}>'
def as_vector_with_length(self, length):
return VectorQuantity(numpy.array([self.value] * length), self.unit)
def as_vector_quantity(self):
return VectorQuantity([self.value], self.unit)
def new_zeros_array(self, length):
array = numpy.zeros(length, dtype=self.unit.dtype)
return new_quantity(array, self.unit)
def __getstate__(self):
return (self.unit, self.value)
def __setstate__(self, x):
self.unit = x[0]
self.value = x[1]
class AdaptingVectorQuantity(VectorQuantity):
"""
Adapting vector quanity objects will adapt their units to the
first object added to the vector
"""
def __init__(self, value=[], unit=None):
VectorQuantity.__init__(self, value, unit)
del self._number
self._number_list = list(value)
if unit is None:
self.append = self.append_start
self.prepend = self.prepend_start
else:
self.append = self.append_normal
self.prepend = self.prepend_normal
def __getattr__(self, attribute):
if attribute == "_number":
if self.unit is None:
return numpy.array(self._number_list)
else:
return numpy.array(self._number_list, dtype=self.unit.dtype)
else:
raise AttributeError
def append_start(self, quantity):
self.unit = quantity.unit
self._number_list.append(quantity.value_in(self.unit))
self.append = self.append_normal
self.prepend = self.prepend_normal
def append_normal(self, quantity):
self._number_list.append(quantity.value_in(self.unit))
prepend_start = append_start
def prepend_normal(self, quantity):
self._number_list.insert(0, quantity.value_in(self.unit))
def extend(self, quantity):
for x in quantity:
self.append(x)
def __setitem__(self, index, quantity):
quantity_in_my_units = quantity.as_quantity_in(self.unit)
self._number_list[index] = quantity_in_my_units.number
self._remove_cached_number()
def __getitem__(self, index):
return new_quantity(self.number[index], self.unit)
def __str__(self):
if self.unit is None:
return str(self._number_list)
else:
return console.current_printing_strategy.quantity_to_string(self)
__array_like = (list, tuple, numpy.ndarray, range)
def new_quantity(value, unit):
"""Create a new Quantity object.
:argument value: numeric value of the quantity, can be
a number or a sequence (list or ndarray)
:argument unit: unit of the quantity
:returns: new ScalarQuantity or VectorQuantity object
"""
if isinstance(value, __array_like):
return VectorQuantity(value, unit)
if unit.is_non_numeric():
return NonNumericQuantity(value, unit)
return ScalarQuantity(value, unit)
def new_quantity_from_unit(unit, value):
return new_quantity(value, unit)
core.unit.new_quantity = new_quantity_from_unit
def new_quantity_nonone(value, unit):
"""Create a new Quantity object.
:argument value: numeric value of the quantity, can be
a number or a sequence (list or ndarray)
:argument unit: unit of the quantity
:returns: new ScalarQuantity or VectorQuantity object
"""
if not unit.base:
if isinstance(value, __array_like):
return numpy.asarray(value) * unit.factor
else:
return value * unit.factor
if isinstance(value, __array_like):
return VectorQuantity(value, unit)
if unit.is_non_numeric():
return NonNumericQuantity(value, unit)
return ScalarQuantity(value, unit)
def is_quantity(x):
return hasattr(x, "is_quantity") and x.is_quantity()
def is_unit(x):
return hasattr(x, "base")
def isNumber(x):
try:
return 0 == x*0
except:
return False
def as_vector_quantity(value):
if is_quantity(value):
return value
else:
if isinstance(value, numpy.ndarray) and numpy.issubdtype(value.dtype, numpy.number):
return new_quantity(value, none)
if isinstance(value, __array_like): # its not a homogeneous numpy array, this can be slow
result = AdaptingVectorQuantity()
for subvalue in value:
result.append(as_vector_quantity(subvalue))
return result
else:
if isNumber(value):
return new_quantity(value, none)
else:
raise Exception("Cannot convert '{0!r}' to a vector quantity".format(value))
def to_quantity(x):
if is_quantity(x):
return x
else:
return new_quantity(x, none)
def as_quantity_in(x,unit):
return to_quantity(x).as_quantity_in(unit)
def value_in(x,unit):
return to_quantity(x).value_in(unit)
def concatenate(quantities):
first = quantities[0]
if not is_quantity(first):
return numpy.concatenate(quantities)
unit = first.unit
numbers = list([x.value_in(unit) for x in quantities])
concatenated = numpy.concatenate(numbers)
return VectorQuantity(concatenated, unit)
def column_stack(args):
args_ = [to_quantity(x) for x in args]
units = set([x.unit for x in args_])
if len(units)==1:
return new_quantity(numpy.column_stack([x.number for x in args_]),args_[0].unit)
else:
return numpy.column_stack(args)
def stack(args):
args_ = [to_quantity(x) for x in args]
units = set([x.unit for x in args_])
if len(units) == 1:
return new_quantity(numpy.stack([x.number for x in args_]),args_[0].unit)
else:
return numpy.stack(args)
def arange(start, stop, step):
if not is_quantity(start):
return numpy.arange(start, stop, step)
unit = start.unit
start_value = start.value_in(unit)
stop_value = stop.value_in(unit)
step_value = step.value_in(unit)
array = numpy.arange(start_value, stop_value, step_value)
return new_quantity(array, unit)
def linspace(start, stop, num = 50, endpoint=True, retstep=False):
if not is_quantity(start):
return numpy.linspace(start, stop, num, endpoint, retstep)
unit = start.unit
start_value = start.value_in(unit)
stop_value = stop.value_in(unit)
array = numpy.linspace(start_value, stop_value, num, endpoint, retstep)
if retstep:
return new_quantity(array[0], unit), new_quantity(array[1], unit)
else:
return new_quantity(array, unit)
def separate_numbers_and_units(values):
number = []
unit = []
for value in values:
if is_quantity(value):
number.append(value.number)
unit.append(value.unit)
else:
number.append(value)
unit.append(none)
return number, unit
def meshgrid(*xi, **kwargs):
unitless_xi, units = separate_numbers_and_units(xi)
result = numpy.meshgrid(*unitless_xi, **kwargs)
return [matrix | unit for matrix, unit in zip(result, units)]
def polyfit(x, y, deg):
(x_number, y_number), (x_unit, y_unit) = separate_numbers_and_units([x, y])
fit = numpy.polyfit(x_number, y_number, deg)
fit = [f | y_unit/(x_unit**(deg-i)) for i, f in enumerate(fit)]
return fit
def polyval(p, x):
if len(p) == 1:
return numpy.ones(x.shape) * p[0]
p_number, p_unit = separate_numbers_and_units(p)
y_unit = p_unit[-1].to_reduced_form()
x_unit = (y_unit/p_unit[-2]).to_reduced_form()
if x_unit != none:
x = x.value_in(x_unit)
value = numpy.polyval(p_number, x)
return value | y_unit
def searchsorted(a, v, **kwargs):
if is_quantity(a):
return numpy.searchsorted(a.value_in(a.unit), v.value_in(a.unit), **kwargs)
else:
return numpy.searchsorted(a, v, **kwargs)
def sign(x):
return numpy.sign(to_quantity(x).number)
if HAS_ASTROPY:
def to_astropy(quantity):
"Convert a quantity from AMUSE to Astropy"
# NOTE: we need to go through SI base here because AMUSE and Astropy
# don't necessarily agree on derived unit definitions...
# Find the SI bases of the unit
unit = quantity.unit
unit_bases = unit.base
# Find the quantity's value in base units
value = quantity.value_in(unit.base_unit())
# Reconstruct the quantity in Astropy units
ap_quantity = value
for base_unit in unit_bases:
if base_unit[1] == amuse.units.si.m:
ap_quantity = ap_quantity * astropy.units.m**base_unit[0]
elif base_unit[1] == amuse.units.si.kg:
ap_quantity = ap_quantity * astropy.units.kg**base_unit[0]
elif base_unit[1] == amuse.units.si.s:
ap_quantity = ap_quantity * astropy.units.s**base_unit[0]
elif base_unit[1] == amuse.units.si.A:
ap_quantity = ap_quantity * astropy.units.A**base_unit[0]
elif base_unit[1] == amuse.units.si.K:
ap_quantity = ap_quantity * astropy.units.K**base_unit[0]
elif base_unit[1] == amuse.units.si.mol:
ap_quantity = ap_quantity * astropy.units.mol**base_unit[0]
elif base_unit[1] == amuse.units.si.cd:
ap_quantity = ap_quantity * astropy.units.cd**base_unit[0]
return ap_quantity
def from_astropy(ap_quantity):
"Convert a quantity from Astropy to AMUSE"
# NOTE: we need to go through SI base here because AMUSE and Astropy
# don't necessarily agree on derived unit definitions...
# Find SI bases of the unit
si_bases = ap_quantity.si.unit.bases
si_powers = ap_quantity.si.unit.powers
si_units = list(zip(si_powers, si_bases))
# Find the quantity's value in base units
si_value = ap_quantity.si.value
# Reconstruct the quantity in AMUSE units
amuse_quantity = si_value
for base_unit in si_units:
if base_unit[1].name == "m":
amuse_quantity = amuse_quantity * (
1 | amuse.units.si.m**base_unit[0]
)
elif base_unit[1].name == "kg":
amuse_quantity = amuse_quantity * (
1 | amuse.units.si.kg**base_unit[0]
)
elif base_unit[1].name == "s":
amuse_quantity = amuse_quantity * (
1 | amuse.units.si.s**base_unit[0]
)
elif base_unit[1].name == "A":
amuse_quantity = amuse_quantity * (
1 | amuse.units.si.A**base_unit[0]
)
elif base_unit[1].name == "K":
amuse_quantity = amuse_quantity * (
1 | amuse.units.si.K**base_unit[0]
)
elif base_unit[1].name == "mol":
amuse_quantity = amuse_quantity * (
1 | amuse.units.si.mol**base_unit[0]
)
elif base_unit[1].name == "cd":
amuse_quantity = amuse_quantity * (
1 | amuse.units.si.cd**base_unit[0]
)
return amuse_quantity
| 45,030
| 29.16142
| 126
|
py
|
amuse
|
amuse-main/src/amuse/units/generic_unit_converter.py
|
import numpy
from amuse.units.quantities import new_quantity
from amuse.units.quantities import is_unit
from amuse.units.quantities import is_quantity
from amuse.units.generic_unit_system import *
from amuse.support import exceptions
from amuse.support.core import late
class UnitsNotOrtogonalException(exceptions.AmuseException):
formatstring = 'The number of orthoganal units is incorrect, expected {0} but found {1}. To convert between S.I. units and another system of units a set of quantities with orthogonal units is needed. These can be quantities with a single unit (such as length or time) or quantities with a derived units (such as velocity or force)'
class NotAQuantityException(exceptions.AmuseException):
formatstring = 'Converters need to be initialized with a quantity argument[{0}] {1!r} is not a quantity'
class NotAScalarException(exceptions.AmuseException):
formatstring = 'Converters need to be initialized with scalar quantities, argument[{0}] {1!r} is not a scalar'
class GenericToSiConverter(object):
def __init__(self, generic_to_si):
self.generic_to_si = generic_to_si
def from_source_to_target(self, quantity):
if hasattr(quantity, 'unit') and not quantity.unit.is_non_numeric():
return self.generic_to_si.to_si(quantity)
else:
return quantity
def from_target_to_source(self, quantity):
if hasattr(quantity, 'unit') and not quantity.unit.is_non_numeric():
return self.generic_to_si.to_generic(quantity)
else:
return quantity
class SiToGenericConverter(object):
def __init__(self, generic_to_si):
self.generic_to_si = generic_to_si
def from_source_to_target(self, quantity):
if hasattr(quantity, 'unit') and not quantity.unit.is_non_numeric():
return self.generic_to_si.to_generic(quantity)
else:
return quantity
def from_target_to_source(self, quantity):
if hasattr(quantity, 'unit') and not quantity.unit.is_non_numeric():
return self.generic_to_si.to_si(quantity)
else:
return quantity
class ConvertBetweenGenericAndSiUnits(object):
"""
A ConvertBetweenGenericAndSiUnits object is a converter from
arbitrary units which user gives, to si units (and vice versa).
The ``generic_unit_converter'' **ConvertBetweenGenericAndSiUnits**
is the actual class through which you define
the unit system. Upon instantiation you choose the base units. In the
example below we chose the speed of light as a unit: *c* = 1 unit length/second,
and the second as the unit of time.
Note that the system has two base dimensions, length and time. By the second argument we have
assigned the unit second to time and by the requirement that unit lenght / second equals one,
the new unit length will be {*c*} meters in S.I. units.
Example::
>>> from amuse.units.generic_unit_converter import ConvertBetweenGenericAndSiUnits
>>> from amuse.units import units, constants
>>> converter = ConvertBetweenGenericAndSiUnits(constants.c, units.m)
"""
def __init__(self, *arguments_list):
self.check_arguments(arguments_list)
self.values = arguments_list
self.units_of_values = [x.unit for x in self.values]
self.system_rank = len(self.values)
self.new_base = numpy.zeros((self.system_rank,self.system_rank))
self.new_base_inv = numpy.zeros((self.system_rank,self.system_rank))
available_units = set()
for unit in self.units_of_values:
for factor, base_unit in unit.base:
available_units.add(base_unit)
if len(available_units) is not self.system_rank:
raise UnitsNotOrtogonalException(self.system_rank, len(available_units))
self.list_of_available_units = numpy.array(list(available_units))
self.new_base = self.determine_new_base()
rank_of_new_base = self.matrixrank(self.new_base)
if rank_of_new_base < self.system_rank:
raise UnitsNotOrtogonalException(self.system_rank, rank_of_new_base)
self.new_base_inv = numpy.linalg.matrix_power(self.new_base, -1)
def check_arguments(self, arguments):
for index, x in enumerate(arguments):
if is_unit(x):
continue
if not is_quantity(x):
raise NotAQuantityException(index, x)
if not x.is_scalar():
raise NotAScalarException(index, x)
def matrixrank(self, A, tol=1e-8):
s = numpy.linalg.svd(A, compute_uv=0)
return numpy.sum(numpy.where(s > tol, 1, 0))
def determine_new_base(self):
matrix = numpy.zeros((self.system_rank, self.system_rank))
for row, value in enumerate(self.values):
for n, unit in value.unit.base:
matrix[
row,
[
column for column, available_unit in enumerate(
self.list_of_available_units
) if available_unit == unit
]
] = n
return matrix
@late
def conversion_factors(self):
factors_of_the_bases = numpy.zeros((self.system_rank,1))
for row, value in enumerate(self.values):
factors_of_the_bases[row] = value.number * value.unit.factor
log_factors_of_the_bases = numpy.log(numpy.abs(factors_of_the_bases))
result = numpy.array(
numpy.exp(
numpy.matmul(self.new_base_inv, log_factors_of_the_bases)
)
)[:,0]
return result
@late
def units(self):
conversion_factors = self.conversion_factors
result = []
generic_units = mass, length, time, temperature, current, luminous_intensity
for n, unit in enumerate(self.list_of_available_units):
conversion_factor_for_this_base_unit = conversion_factors[n]
for generic_unit in generic_units:
if generic_unit.unit_in_si == unit:
result.append((generic_unit, conversion_factor_for_this_base_unit * unit))
return result
def find_si_unit_for(self, unit):
for unit_generic, unit_in_si in self.units:
if unit_generic == unit:
return unit_generic, unit_in_si
return None, None
def find_generic_unit_for(self, unit):
for unit_generic, unit_in_si, base_in_si in self.unit_gerenic_to_unit_in_si:
if base_in_si == unit:
return unit_generic, unit_in_si
return None, None
def to_si(self, value):
"""
Convert a quantity in generic units to a quantity in
S.I. units.
.. code-block:: python
>>> from amuse.units.generic_unit_converter import ConvertBetweenGenericAndSiUnits
>>> from amuse.units import units, constants
>>> converter = ConvertBetweenGenericAndSiUnits(constants.c, units.s)
>>> print converter.to_si(length)
299792458.0 m
"""
if not hasattr(value, 'unit'):
return value
factor = value.unit.factor
number = value.number
new_unit = 1
base = value.unit.base
if not base:
return value
for n, unit in base:
unit_in_generic, unit_in_si = self.find_si_unit_for(unit)
if not unit_in_si is None:
factor = factor * (unit_in_si.factor ** n)
new_unit *= (unit_in_si.base[0][1] ** n)
else:
new_unit *= (unit ** n)
return new_quantity(number * factor, new_unit)
def to_nbody(self, value):
return self.to_generic(value)
def to_generic(self, value):
"""
Convert a quantity in S.I units to a quantity in
generic units.
.. code-block:: python
>>> from amuse.units.generic_unit_converter import ConvertBetweenGenericAndSiUnits
>>> from amuse.units import units, constants
>>> converter = ConvertBetweenGenericAndSiUnits(constants.c, units.s)
>>> print converter.to_generic(constants.c)
1.0 length * time**-1
"""
generic_units_in_si = self.units
if value.unit is None:
return value
base = value.unit.base
factor = value.unit.factor
number = value.number
new_unit = 1
if not base:
return value
for n, unit in base:
unit_in_generic, unit_in_si = self.find_generic_unit_for(unit)
if not unit_in_si is None:
factor = factor / (unit_in_si.factor ** n)
new_unit *= (unit_in_generic.base[0][1] ** n)
else:
new_unit *= (unit ** n)
return new_quantity(number * factor, new_unit)
def as_converter_from_si_to_generic(self):
return SiToGenericConverter(self)
def as_converter_from_generic_to_si(self):
return GenericToSiConverter(self)
# def __repr__(self):
# string = ""
# string+= "generic unit converter object information"
# string+= "-----------------------------------------"
# string+= "unit system defined by setting {0} to ONE".format(self.values)
# string+= "Q = {Q}*[Q]; Quantity = value * unit"
#
# for generic_unit, si_unit in self.units:
# string+= "Q = {{1}} * [{0}] Q = {{ {1} }} * [{2}^{3}]".format(
# generic_unit.base,
# si_unit.number,
# si_unit.base[1],
# si_unit.base[0])
#
# string+= self.list_of_available_units
# string+= self.new_base
# string+= self.new_base_inv
#
# return string
@late
def mapping_from_base_in_si_to_unit_generic(self):
result = {}
for unit_generic, unit_in_si in self.units:
base_in_si = unit_in_si.base[0][1]
result[base_in_si] = unit_generic, unit_in_si
return result
@late
def unit_gerenic_to_unit_in_si(self):
result = []
for unit_generic, unit_in_si in self.units:
base_in_si = unit_in_si.base[0][1]
result.append((unit_generic, unit_in_si, base_in_si,))
return result
| 11,583
| 39.645614
| 335
|
py
|
amuse
|
amuse-main/src/amuse/units/__init__.py
| 0
| 0
| 0
|
py
|
|
amuse
|
amuse-main/src/amuse/units/scaling_converter.py
|
from amuse.units import generic_unit_system
from amuse.units.quantities import new_quantity
class ScalingConverter(object):
def __init__(
self,
length = 1,
time = 1,
mass = 1,
current = 1,
temperature = 1,
amount = 1,
luminous_intensity = 1
):
self.factors = {}
self.factors[generic_unit_system.mass] = mass
self.factors[generic_unit_system.length] = length
self.factors[generic_unit_system.time] = time
self.factors[generic_unit_system.temperature] = temperature
self.factors[generic_unit_system.current] = current
self.factors[generic_unit_system.luminous_intensity] = luminous_intensity
def reversed(self):
return ScalingConverter(
length = 1 / self.factors[generic_unit_system.length],
time = 1 / self.factors[generic_unit_system.time],
mass = 1 / self.factors[generic_unit_system.mass],
current = 1 / self.factors[generic_unit_system.current],
temperature = 1 / self.factors[generic_unit_system.temperature],
amount = 1,
luminous_intensity = 1 / self.factors[generic_unit_system.luminous_intensity]
)
def convert(self, quantity):
unit = quantity.unit
value = quantity.value_in(unit)
base = unit.base
if not base:
return quantity
new_unit = 1
factor = unit.factor
for n, unit in base:
if unit in self.factors:
factor_for_unit = self.factors[unit]
factor = factor * (factor_for_unit ** n)
new_unit *= (unit ** n)
else:
new_unit *= (unit ** n)
return new_quantity(value * factor, new_unit)
| 1,853
| 33.333333
| 89
|
py
|
amuse
|
amuse-main/src/amuse/units/si.py
|
from amuse.units import core
system = core.system('S.I.')
m = core.base_unit('length', 'meter', 'm', system)
kg = core.base_unit('mass', 'kilogram', 'kg', system)
s = core.base_unit('time', 'second', 's', system)
A = core.base_unit('electric current', 'ampere', 'A', system)
K = core.base_unit('thermodynamic temperature', 'kelvin', 'K', system)
mol = core.base_unit('amount of substance', 'mole', 'mol', system)
cd = core.base_unit('luminous intensity', 'candela', 'cd', system)
no_system = core.no_system
none = core.none_unit('none', 'none')
no_unit = none
named = core.named_unit
# SI prefixes
def deca(unit):
return named('deca' + unit.name, 'da' + unit.symbol, 10. * unit)
def hecto(unit):
return named('hecto' + unit.name, 'h' + unit.symbol, 100. * unit)
def kilo(unit):
return named('kilo' + unit.name, 'k' + unit.symbol, 1000. * unit)
def mega(unit):
return named('mega' + unit.name, 'M' + unit.symbol, 1.e6 * unit)
def giga(unit):
return named('giga' + unit.name, 'G' + unit.symbol, 1.e9 * unit)
def tera(unit):
return named('tera' + unit.name, 'T' + unit.symbol, 1.e12 * unit)
def peta(unit):
return named('peta' + unit.name, 'P' + unit.symbol, 1.e15 * unit)
def exa(unit):
return named('exa' + unit.name, 'E' + unit.symbol, 1.e18 * unit)
def zetta(unit):
return named('zetta' + unit.name, 'Z' + unit.symbol, 1.e21 * unit)
def yotta(unit):
return named('yotta' + unit.name, 'Y' + unit.symbol, 1.e24 * unit)
def deci(unit):
return named('deci' + unit.name, 'd' + unit.symbol, 0.1 * unit)
def centi(unit):
return named('centi' + unit.name, 'c' + unit.symbol, 0.01 * unit)
def milli(unit):
return named('milli' + unit.name, 'm' + unit.symbol, 0.001 * unit)
def micro(unit):
return named('micro' + unit.name, 'mu' + unit.symbol, 1.e-6 * unit)
def nano(unit):
return named('nano' + unit.name, 'n' + unit.symbol, 1.e-9 * unit)
def pico(unit):
return named('pico' + unit.name, 'p' + unit.symbol, 1.e-12 * unit)
def femto(unit):
return named('femto' + unit.name, 'f' + unit.symbol, 1.e-15 * unit)
def atto(unit):
return named('atto' + unit.name, 'a' + unit.symbol, 1.e-18 * unit)
def zepto(unit):
return named('zepto' + unit.name, 'z' + unit.symbol, 1.e-21 * unit)
def yocto(unit):
return named('yocto' + unit.name, 'y' + unit.symbol, 1.e-24 * unit)
k = kilo
| 2,349
| 36.301587
| 71
|
py
|
amuse
|
amuse-main/src/amuse/units/derivedsi.py
|
from amuse.units.si import (
named, s, m, kg, A, none, k,
)
from amuse.units.si import *
Hz = named('hertz', 'Hz', 1 / s)
MHz = named('megahertz', 'MHz', 1e6 * Hz)
rad = named('radian', 'rad', m / m)
sr = named('steradian', 'sr', m**2 / (m**2))
N = named('newton', 'N', kg * m / (s**2))
Pa = named('pascal', 'Pa', N / (m**2))
J = named('joule', 'J', kg * m**2 * s**-2)
W = named('watt', 'W', J / s)
F = named('farad', 'F', s**4 * A**2 * m**(-2) * kg**(-1))
C = named('coulomb', 'C', A * s)
V = named('volt', 'V', J / C)
T = named('tesla', 'T', kg / A / s / s)
tesla = T
ohm = named('ohm', 'ohm', V / A)
S = named('siemens', 'S', A / V)
Wb = named('weber', 'Wb', V * s)
weber = Wb
# handy definitions
one = 1 | none
km = k(m)
| 731
| 26.111111
| 57
|
py
|
amuse
|
amuse-main/src/amuse/units/nist.py
|
import urllib.request, urllib.error, urllib.parse, urllib.request, urllib.parse, urllib.error
import difflib
import os.path
import re
from hashlib import md5
from amuse.units import si
from amuse.units import derivedsi
NIST_URL = "http://132.229.222.6:9000/nistdata"
MESSAGE = \
"""
#This is an auto generated file, do not change manually. Instead if you want to add constants
#or change them, change the nist.txt file and run nist.py
import numpy
from amuse.units.si import *
from amuse.units.derivedsi import *
"""
ADDITIONAL_DERIVED_CONSTANTS = \
"""
pi = numpy.pi
hbar = h / (2.0 * numpy.pi)
four_pi_stefan_boltzmann = 4.0 * numpy.pi * Stefan_hyphen_Boltzmann_constant
mu0 = 4 * numpy.pi * 1.e-7 | N/A**2
eps0 = mu0**-1 * c**-2
sidereal_day = 86164.100352 | s
#machine constants
eps = numpy.finfo(numpy.double).eps
precision = int(numpy.log10(2/eps))
"""
class GetConstantsFromFiles(object):
def __init__(self):
self.nist_table = ""
self.local_table = ""
self.translator_table = []
self.directory = os.path.dirname(__file__)
def get_table_from_url(self):
f = urllib.request.urlopen(NIST_URL)
self.nist_table = f.read()
f.close()
def save_table_as(self, filename):
f = open(os.path.join(self.directory, 'nist.txt'), 'w')
f.write(self.nist_table)
f.close()
def get_table_from_file(self):
f = open(os.path.join(self.directory, 'nist.txt'), 'r') # CODATA 2006, for CODATA 2010 use 'nist2010.txt'
self.nist_table = f.read()
f.close()
def check_current_file_with_table(self):
md5sum_local = md5()
md5sum_local.update(self.local_table)
md5sum_local_val = md5sum_local.hexdigest()
md5sum_wgot = md5()
md5sum_wgot.update(self.nist_table)
md5sum_wgot_val = md5sum_wgot.hexdigest()
return md5sum_local_val == md5sum_wgot_val
def compare_char_by_char(self):
self.nist_table.lstrip('\n')
mydiff = difflib.unified_diff(self.nist_table.splitlines(1), self.local_table.splitlines(1))
for i in list(mydiff):
print(i)
def get_translator(self):
f = open(os.path.join(self.directory, 'translator.txt'), 'r')
lines = f.readlines()
for i, s in enumerate(lines):
cols = s.split(',')
self.translator_table.append(cols)
f.close()
class Constants(object):
def __init__(self):
self.I = GetConstantsFromFiles()
#I.get_table_from_url()
self.I.get_table_from_file()
self.table = self.I.nist_table
self.I.get_translator()
self.translator = self.I.translator_table
self.nistfile = MESSAGE
self.nisttable = []
self.nisttablederivedunits = []
self.nisttablenoneunits = []
self.nisttablebaseunits = []
self.nisttabledependingunits = []
self.siunits = dir(si)+dir(derivedsi)
def test_regexp(self, regexp):
lines =self.table.splitlines(1)
for i,line in enumerate(lines):
if i>80:
break
print(re.findall(regexp, line))
def translate(self, to_translate):
list = [s[1] for s in self.translator if to_translate == s[0]]
if list == []:
return to_translate.lstrip(' ')
else:
return list[0].strip('\n')
def list_constants(self):
error =[]
value = []
name = []
unit = []
lines =self.table.splitlines(1)
for n, line in enumerate(lines):
if "----------------------" in line:
number_of_header_lines = n + 1
break
firstline = lines[number_of_header_lines]
namestr_length = len(firstline) - len(firstline[firstline.find(" "):].lstrip())
column_index_of_uncertainty = len(firstline) - len(firstline[namestr_length+21+firstline[namestr_length+21:].find(" "):].lstrip())
column_index_of_unit = len(firstline) - len(firstline[column_index_of_uncertainty+21+firstline[column_index_of_uncertainty+21:].find(" "):].lstrip())
for i in lines[number_of_header_lines:]:
namestr = i[0:namestr_length]
marker1 = column_index_of_uncertainty
marker2 = column_index_of_unit
while 1:
if i[marker1-1]=='\x20':
break
else:
marker1+=1
while 1:
if i[marker2-1]=='\x20':
break
else:
marker2+=1
nrs=[]
nrs.append(i[namestr_length:marker1])
nrs.append(i[marker1:marker2])
unitstr = i[marker2:]
unitstr = unitstr.strip().replace(' ','*').replace('^','**')
new_name = self.translate(namestr.rstrip(' ').replace(' ','_').replace('.','').replace('{','X').replace('}','X').replace('(','X').replace(')','X').replace('-','_hyphen_').replace(',','_and_').replace('/','_div_'))
error.append(nrs[1].replace(' ',''))
if len(unitstr)==1:
this_unit = "none\n"
else:
this_unit = unitstr
self.nisttable.append([new_name, float(i[namestr_length:marker1].replace(' ','').replace('...','')), unitstr])
def sort_units(self):
for entry in self.nisttable:
if entry[2] in self.siunits:
self.nisttablebaseunits.append(entry)
elif entry[2] == '':
self.nisttablenoneunits.append(entry)
elif set(re.split('[*/^]',re.sub('\*\*-?[0-9.]*','',entry[2]))).issubset(set(self.siunits)):
self.nisttablederivedunits.append(entry)
else:
self.nisttabledependingunits.append(entry)
def print_list_of_units(self, unitlist):
for name, value, unit in unitlist:
self.nistfile += ("{0} = {1} | {2}\n".format(name, value, unit or "none"))
def generate_constants(self):
self.list_constants()
self.sort_units()
self.nistfile += "#BASE UNITS***********************************************\n"
self.print_list_of_units(self.nisttablebaseunits)
self.nistfile += "#DERIVED UNITS***********************************************\n"
self.print_list_of_units(self.nisttablederivedunits)
self.nistfile += "#RATIOS ***********************************************\n"
self.print_list_of_units(self.nisttablenoneunits)
self.nistfile += "#DERIVED CONSTANTS***********************************************"
self.nistfile += ADDITIONAL_DERIVED_CONSTANTS
self.nistfile += '#DROPPED UNITS***********************************************\n"""'
self.print_list_of_units(self.nisttabledependingunits)
self.nistfile +='"""\n'
f = open(os.path.join(self.I.directory, 'constants.py'), 'w')
f.write(self.nistfile)
f.close()
if __name__ == "__main__":
print("Generating constants.py...", end=' ')
Constants().generate_constants()
print(" done!")
| 7,342
| 35.351485
| 225
|
py
|
amuse
|
amuse-main/src/amuse/couple/multiples.py
|
import sys
import numpy
import collections
import math
import copy
from amuse.datamodel import particle_attributes
from amuse.datamodel import trees
from amuse.datamodel import Particle, Particles
from amuse.rfi.core import is_mpd_running
from amuse.ic.plummer import new_plummer_model
from amuse.ic.salpeter import new_salpeter_mass_distribution_nbody
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse.units import quantities
from amuse.units.quantities import zero
from amuse.support.exceptions import KeysNotInStorageException
from amuse import io
#---------------------------------------------------------------------
#
# Steve's ToDo list of features to be added/improved in the multiples
# module.
#
# 1. Should use only perturbers (within ~100 x interaction scale) in
# computing the tidal energy change, not the entire system.
#
# 2. If the gravity module supports it, only perturbers need be
# synchronized before and reinitialized after the interaction.
#
# 3. Including near neighbors in a 2-body interaction is likely to
# lead to spurious binaries, since the 3- (or more-) body interaction
# will be followed to completion, when in fact it should be stopped
# when the neighbor has left the interaction region. The result is
# that binaries may form prematurely. If we want to include
# neighbors, we should also implement item 4 below, to allow long
# interactions to be broken into pieces. Including neighbors in the
# interaction may also lead to problematic final configurations and
# large internal/external tidal errors. Alternatively, we can let
# near neighbors simply veto the encounter, moving the work back into
# the gravity module, until a "clean" 2-body scattering can be
# identified. Vetoing is now the default.
#
# 4. We should seek a better prescription for compressing 3-body and
# higher-order configurations. Currently we conserve energy, but not
# angular momentum.
#
# 5. There is no provision for physical collisions in the smallN code,
# and no logic in the Multiples module to manage stars having both
# dynamical and physical radii.
#
#---------------------------------------------------------------------
# The following simple CM indexing scheme is OK for N < 1000000. An
# improved scheme might be desirable, but it must be compatible with
# the integer IDs used in most gravity modules.
root_index = 1000000
def new_root_index():
global root_index
root_index += 1
return root_index
def name_object(tree):
name = "{ "
if hasattr(tree, "child1"):
children = [tree.child1, tree.child2]
else:
#children = tree.get_tree_subset()
children = [tree.particle.child1, tree.particle.child2]
for child in sorted(children, \
key=lambda x: x.id if hasattr(x ,"id") else 1e10):
if child.child1 is not None:
name += name_object(child)
else:
name += str(child.id)
name += " "
name += "}"
return name
def name_pair(comp1, comp2):
return '('+str(comp1.id)+','+str(comp2.id)+')'
known_roots = {}
def assign_id_to_root(tree):
# Determine the object's description, then search to see if we
# know about it. If we do, return that ID, otherwise create a new
# ID.
global known_roots
my_name = name_object(tree)
if my_name in known_roots.keys():
return known_roots[my_name]
else:
new_root_id = new_root_index()
known_roots[my_name] = new_root_id
return new_root_id
def is_a_parent(child1_key, child2_key):
return child1_key > 0 or child2_key > 0
def is_not_a_child(is_a_child):
return is_a_child == 0
def get_component_binary_elements(comp1, comp2, kep, peri = 0):
mass = comp1.mass + comp2.mass
pos = comp2.position - comp1.position
vel = comp2.velocity - comp1.velocity
kep.initialize_from_dyn(mass, pos[0], pos[1], pos[2],
vel[0], vel[1], vel[2])
a,e = kep.get_elements()
r = kep.get_separation()
E,J = kep.get_integrals() # per unit reduced mass, note
if peri:
M,th = kep.get_angles()
if M < 0:
kep.advance_to_periastron()
else:
kep.return_to_periastron()
t = kep.get_time()
return mass,a,e,r,E,t
def get_cm_binary_elements(p, kep, peri = 0):
return get_component_binary_elements(p.child1, p.child2, kep, peri)
class DidNotFinishException(Exception):
pass
class Multiples(object):
def __init__(self,
gravity_code,
resolve_collision_code_creation_function,
kepler_code,
gravity_constant = None, **options):
# Codes to use.
self.gravity_code = gravity_code
self.resolve_collision_code_creation_function \
= resolve_collision_code_creation_function
self.kepler = kepler_code
# Data structures.
# Local copy of CM (= root) particles in the gravity code.
self._inmemory_particles = self.gravity_code.particles.copy()
# Dictionary connecting center of mass particles with the
# multiple tree structure lying under them.
#
# Syntax:
# root_to_tree[center_of_mass] = binary_tree
#
# where center_of_mass is a Particle and binary_tree is created by
# trees.BinaryTreesOnAParticleSet.
self.root_to_tree = {}
'''
# Unecessary code since copy_attribute below does the same thing.
if len(self.gravity_code.particles) == 0:
self._inmemory_particles.id = None
else:
self._inmemory_particles.id = self.gravity_code.particles.index_in_code
'''
self._inmemory_particles.child1 = None
self._inmemory_particles.child2 = None
self.channel_from_code_to_memory = \
self.gravity_code.particles.new_channel_to(self._inmemory_particles)
self.channel_from_code_to_memory.copy_attribute("index_in_code", "id")
# FLASH interface needs a channel the other way also - Josh.
self.channel_from_memory_to_code = \
self._inmemory_particles.new_channel_to(self.gravity_code.particles)
if gravity_constant is None: # default is N-body units
gravity_constant = nbody_system.G
self.gravity_constant = gravity_constant
# Energy bookkeeping.
zero_energy = zero * self.gravity_code.kinetic_energy
self.multiples_external_tidal_correction = zero_energy
self.multiples_internal_tidal_correction = zero_energy
self.multiples_integration_energy_error = zero_energy
# Count the number of collisions we found for comparing with
# encounters algorithm.
self.number_of_collisions = 0
# Repeat encounter management data
self.old_star_1 = 0
self.old_star_2 = 0
self.repeat_count = 0
# The following tunable parameters govern the multiples logic:
# Nominal size of the top-level encounter, relative to the sum
# of the radii of the interacting components (no veto) or
# their separation (veto).
#self.initial_scale_factor = 1.0
self.initial_scale_factor = 2.0
# Perturbation above which to include a neighbor, estimated
# using the neighbor distance and the initial separation of
# the top-level two-body encounter. (Previously was a simple
# distance criterion...)
#self.neighbor_distance_factor = 1.0
#self.neighbor_distance_factor = 2.0
self.neighbor_perturbation_limit = 0.02
# Neighbor veto policy. True means we allow neighbors to veto
# a two-body encounter (meaning that don't want to deal with
# complex initial many-body configurations). False means we
# include neighbors in the multiple integration.
#self.neighbor_veto = False
self.neighbor_veto = True
# Size of the rescaled final system, relative to the initial
# scale. Should be 1 + epsilon.
self.final_scale_factor = 1.01
# Initial separation for the scattering experiment, relative
# to the initial scale. May be limited by apocenter in case
# of a bound top-level interaction.
self.initial_scatter_factor = 10.0
# Final separation for the scattering experiment, relative to
# the initial scattering scale (meaning 2 x 10 times the
# initial encounter scale, by default). May be limited by
# binary properties in case of a bound top-level interaction.
#self.final_scatter_factor = 10.0
self.final_scatter_factor = 2.0
# Binary retention policy. Retain a binary if its apocenter
# (True) or 2*semi-major axis (False) is less than the
# dynamical radius of its CM. False is the more conservative
# choice.
self.retain_binary_apocenter = True
# Maximum allowed perturbation at apocenter on a wide binary.
self.wide_perturbation_limit = 0.01
# Turn on/off global debugging output level (0 = no output, 1
# = minimal, 2 = normal debug, 3 = verbose debug).
self.global_debug = 1
# Turn on debugging in the encounters code.
self.debug_encounters = False
# Turn on/off experimental code to check tidal perturbation.
self.check_tidal_perturbation = False
@property
def particles(self):
return self.gravity_code.particles
@property
def total_mass(self):
return self.gravity_code.total_mass
@property
def kinetic_energy(self):
return self.gravity_code.kinetic_energy
@property
def potential_energy(self):
return self.gravity_code.potential_energy
@property
def parameters(self):
return self.gravity_code.parameters
@property
def model_time(self):
return self.gravity_code.model_time
@property
def stars(self):
result = self._inmemory_particles.copy()
for root, tree in self.root_to_tree.items():
root_particle = root.as_particle_in_set(self._inmemory_particles)
result.remove_particle(root)
# This returns a pointer to the actual leaves - Josh.
leaves = tree.get_leafs_subset()
original_star = tree.particle
dx = root_particle.x - original_star.x
dy = root_particle.y - original_star.y
dz = root_particle.z - original_star.z
dvx = root_particle.vx - original_star.vx
dvy = root_particle.vy - original_star.vy
dvz = root_particle.vz - original_star.vz
# Note that here we add leaves to another particle set,
# and this becomes its own deep copy of leaves. So
# changes to leaves_in_result have no effect on leaves -
# Josh.
leaves_in_result = result.add_particles(leaves)
leaves_in_result.x += dx
leaves_in_result.y += dy
leaves_in_result.z += dz
leaves_in_result.vx += dvx
leaves_in_result.vy += dvy
leaves_in_result.vz += dvz
return result
def update_leaves_pos_vel(self):
# The FLASH interface needs a function that updates the
# properties of the leaves from the properties of the
# particles in the gravity code - Josh.
# NOTE: Unlike multiples.stars, this actually moves the real
# leaves, not a copy of the leaf particles. So tree.particle
# also then needs to be updated - Josh.
local_debug = False
self.channel_from_code_to_memory.copy() # update the copy in memory
# from the gravity code - Josh
for root, tree in self.root_to_tree.items():
root_particle = root.as_particle_in_set(self._inmemory_particles)
leaves = tree.get_leafs_subset()
original_star = tree.particle
if (local_debug):
old_leaves_x = leaves.x
print("In update_leaves_pos_vel before update.")
print("Tree pos =", tree.particle.position.in_(units.cm))
print("Root pos =", root.position.in_(units.cm))
print("Leaf pos =", leaves.position.in_(units.cm))
dx = root_particle.x - original_star.x
dy = root_particle.y - original_star.y
dz = root_particle.z - original_star.z
dvx = root_particle.vx - original_star.vx
dvy = root_particle.vy - original_star.vy
dvz = root_particle.vz - original_star.vz
leaves.x += dx
leaves.y += dy
leaves.z += dz
leaves.vx += dvx
leaves.vy += dvy
leaves.vz += dvz
# Update the original particle info stored in
# tree.particle - Josh.
original_star.x = root_particle.x
original_star.y = root_particle.y
original_star.z = root_particle.z
original_star.vx = root_particle.vx
original_star.vy = root_particle.vy
original_star.vz = root_particle.vz
if (local_debug):
new_leaves = tree.get_leafs_subset()
leaves_dx = leaves.x - old_leaves_x
if (leaves_dx[0].number == 0.0):
print("These leaves aren't moving!")
elif (leaves_dx[0].number == dx[0].number):
print("These leaves arrived precisely when they meant to!")
else:
print("I have no idea what these damn leaves are doing!")
print("leaves_dx =", leaves_dx)
print("dx =", dx)
if (local_debug):
print("In update_leaves_pos_vel after update.")
print("Tree pos =", tree.particle.position.in_(units.cm))
print("Root pos =", root.position.in_(units.cm))
print("Leaf pos =", leaves.position.in_(units.cm))
return
def create_binary(self, star1, star2):
# Experimental code to include a binary directly into the
# multiples database.
M,a,e,r,E,tperi = get_component_binary_elements(star1, star2,
self.kepler, 1)
binary = Particles()
binary.add_particle(star1)
binary.add_particle(star2)
cm = Particle(mass=M,
position=binary.center_of_mass(),
velocity=binary.center_of_mass_velocity())
binary.add_particle(cm)
binary.child1 = None
binary.child2 = None
cm = binary[2]
cm.child1 = binary[0]
cm.child2 = binary[1]
set_radii(binary, self.kepler, self.global_debug)
self.gravity_code.particles.remove_particle(star1)
self.gravity_code.particles.remove_particle(star2)
# Complete the bookkeeping.
tree = trees.BinaryTreesOnAParticleSet(binary,
"child1", "child2")
for t in tree.iter_binary_trees(): # only one tree...
t.particle.id = assign_id_to_root(t)
self.gravity_code.particles.add_particle(t.particle)
self.root_to_tree[t.particle] = t.copy()
print('\nCreated binary from', star1.id, 'and', star2.id, \
' CM =', t.particle.id)
print('M =', M, ' a =', a, ' e =', e, ' E =', E)
self.gravity_code.particles.synchronize_to(self._inmemory_particles)
self.channel_from_code_to_memory.copy_attribute("index_in_code", "id")
def check_trees(self):
# Print out some debugging information on multiples in the system.
print('')
print('check_trees:', len(self.root_to_tree), 'tree(s)')
for root, tree in self.root_to_tree.items():
print(root.position) # current
print(tree.particle.position) # original
leaves = tree.get_leafs_subset() # components (original)
print(leaves.center_of_mass())
print('')
def get_gravity_at_point(self, radius, x, y, z):
return self.gravity_code.get_gravity_at_point(radius, x, y, z)
def get_potential_at_point(self, radius, x, y, z):
return self.gravity_code.get_potential_at_point(radius, x, y, z)
def commit_particles(self):
return self.gravity_code.commit_particles()
def get_time(self):
return self.gravity_code.get_time()
def get_total_energy(self, code):
try:
binaries_energy = code.get_binary_energy() # include binaries if
except: # the code understands
binaries_energy = zero
total_energy = code.potential_energy + code.kinetic_energy \
+ binaries_energy
return total_energy
#--------------------------------------------------------------
# Note that the true total energy of a multiple isn't quite the
# Emul returned below, since the tidal potential of components on
# one another is not taken into account.
def get_total_multiple_energy(self): # uses kepler
Nbin = 0
Nmul = 0
Emul = zero
for x in self.root_to_tree.values(): # loop over top-level trees
Nmul += 1
nb,E = get_multiple_energy(x, self.kepler)
Nbin += nb
Emul += E
return Nmul, Nbin, Emul
# This version returns the true total energy of all multiples.
def get_total_multiple_energy2(self):
Nbin = 0
Nmul = 0
Emul = zero
for x in self.root_to_tree.values(): # loop over top-level trees
Nmul += 1
nb,E = get_multiple_energy2(x, self.gravity_constant)
Nbin += nb
Emul += E
return Nmul, Nbin, Emul
def print_multiples(self): # uses kepler
# Print basic information on all multiples in the system,
# using the root_to_tree database. This version uses
# print_multiple_simple() to format the output.
if self.global_debug > 0:
for x in self.root_to_tree.values():
print_multiple_simple(x, self.kepler)
def print_multiples2(self, pre, kT, dcen): # uses kepler
# Print information on all multiples in the system, using the
# root_to_tree database. This version uses
# print_multiple_detailed() to format the output, and returns
# the numbers and energies of multiples found.
Nbin = 0
Nmul = 0
Emul = zero
for x in self.root_to_tree.values():
Nmul += 1
nb,E = print_multiple_detailed(x, self.kepler, pre, kT, dcen)
Nbin += nb
Emul += E
return Nmul, Nbin, Emul
def print_trees_summary(self):
if len(self.root_to_tree) > 0:
print('number of multiples:', len(self.root_to_tree))
sys.stdout.flush()
def evolve_model(self, end_time, callback=None):
stopping_condition = \
self.gravity_code.stopping_conditions.collision_detection
#stopping_condition.enable() # allow user to set this; don't override
time = self.gravity_code.model_time
print("\nmultiples: evolve model to", end_time, "starting at", time)
sys.stdout.flush()
count_resolve_encounter = 0
count_ignore_encounter = 0
while time <= end_time: # the <= here allows zero-length steps
if self.global_debug > 1:
print('')
print('calling evolve_model from', \
self.gravity_code.model_time, 'to', end_time)
sys.stdout.flush()
self.gravity_code.evolve_model(end_time)
newtime = self.gravity_code.model_time
# JB modified this: in Bonsai we can take a zero-length
# time step to detect multiples. That would cause the
# newtime == time to evaluate to true when there are
# multiples detected and break out of the evaluate loop
# before the time reached end_time. Same is now possible
# with ph4 (SLWM, 6/18).
if newtime == time and (stopping_condition.is_set() == False):
break
time = newtime
if stopping_condition.is_set():
# An encounter has occurred. Synchronize all stars in
# the gravity code. We synchronize everything for
# now, but it would be better to just synchronize
# neighbors if gravity_code supports that. TODO
self.gravity_code.synchronize_model()
star1 = stopping_condition.particles(0)[0]
star2 = stopping_condition.particles(1)[0]
ignore = 0
self.before = Particles()
self.after = Particles()
self.after_smalln = Particles()
#print 'self.before:', self.before
# Note from Steve, 8/12: We can pick up a lot of
# encounters that are then ignored here. I have
# (temporarily?) duplicated this check in the ph4
# module (jdata.cc).
r = (star2.position-star1.position).length()
v = (star2.velocity-star1.velocity).length()
vr = ((star2.velocity-star1.velocity) \
* (star2.position-star1.position)).sum()
EPS = 0.001
if True or vr < EPS*r*v: # True ==> keep all encounters
# returned by gravity_code
if self.global_debug > 1:
print('\n'+'~'*60)
elif self.global_debug > 0:
print('')
if self.global_debug > 0:
print('interaction at time', time)
# As with synchronize above, we should only copy
# over data for the interacting particles and
# their neighbors. TODO
self.channel_from_code_to_memory.copy()
self.channel_from_code_to_memory.copy_attribute("index_in_code", "id")
initial_energy = self.get_total_energy(self.gravity_code)
star1 = star1.as_particle_in_set(self._inmemory_particles)
star2 = star2.as_particle_in_set(self._inmemory_particles)
cont = True
if callback != None:
cont = callback(time, star1, star2)
if self.global_debug > 0:
print('initial top-level:', \
star1.id, '('+str(star1.radius)+')', \
star2.id, '('+str(star2.radius)+')')
if self.global_debug > 1:
print(' r =', r)
print(' v =', v)
print(' v.r =', vr)
sys.stdout.flush()
# Do the scattering.
veto, dE_top_level_scatter, dphi_top, dE_mul, \
dphi_int, dE_int, final_particles \
= self.manage_encounter(time, star1, star2,
self._inmemory_particles,
self.gravity_code.particles,
self.kepler)
if cont and not veto:
# Recommit is done automatically and reinitializes all
# particles. Later we will just reinitialize a list if
# gravity_code supports it. TODO
self.gravity_code.particles.synchronize_to(
self._inmemory_particles) # sets _inmemory = gravity
self.channel_from_code_to_memory.copy_attribute(
"index_in_code", "id")
final_energy = self.get_total_energy(self.gravity_code)
dE_top_level = final_energy - initial_energy
# Local bookkeeping:
#
# dE_top_level is the actual energy
# change in the top-level gravity system
# due to this encounter
#
# dE_top_level_scatter is the change in
# top-level internal energy of the
# scattering system
#
# dphi_top is the top-level tidal error
# (currently unabsorbed) due to the
# change in configuration of the
# scattering system in the top-level
# tidal field
#
# dE_mul is the change in stored
# multiple energy associated with the
# encounter
#
# dphi_int is the internal tidal energy
# error due to configuration changes in
# the scattering system
#
# dE_int is the integration error in the
# scattering calculation
#
# We *always* expect
#
# dE_top_level - dE_top_level_scatter - dphi_top = 0.
#
# If this is not the case, then there is an
# error in the internal bookkeeping of
# manage_encounter().
if self.global_debug > 2:
#print 'top-level initial energy =', initial_energy
#print 'top-level final energy =', final_energy
print('dE_top_level =', dE_top_level)
print('dE_top_level_scatter =', dE_top_level_scatter)
print('dphi_top =', dphi_top)
print('dphi_int =', dphi_int)
print('dE_int =', dE_int)
print('dE_top_level-dE_top_level_scatter-dphi_top =',\
dE_top_level - dE_top_level_scatter - dphi_top)
if self.global_debug > 2:
print('net local error =', \
dE_top_level - dE_top_level_scatter - dphi_top)
print('scatter integration error =', dE_int)
# We also expect
#
# dE_top_level_scatter + dE_mul
# = dphi_top + dE_int - dphi_int.
#
# Monitor this and keep track of the
# cumulative value of the right-hand side of
# this equation.
if self.global_debug > 2:
print('dE_mul =', dE_mul)
print('internal local error =', \
dE_top_level + dE_mul - dphi_top)
print('corrected internal local error =', \
dE_top_level + dE_mul - dphi_top \
+ dphi_int - dE_int)
self.multiples_external_tidal_correction += dphi_top
self.multiples_internal_tidal_correction -= dphi_int
self.multiples_integration_energy_error += dE_int
# Doing this energy calculation at every
# encounter is expensive when dealing with
# hundreds of binaries or more. It is clearly
# problematic when building a whole system of
# binaries.
#Nmul, Nbin, Emul = self.get_total_multiple_energy2()
# Global bookkeeping:
#
# We don't absorb individual tidal or
# integration errors, but instead store their
# totals in
#
# self.multiples_external_tidal_correction,
# self.multiples_internal_tidal_correction,
# and
# self.multiples_inegration_energy_error.
#
# Then
#
# E(top-level) + Emul
# - self.multiples_external_tidal_correction
# - self.multiples_internal_tidal_correction
# - self.multiples_integration_energy_error
#
# should be constant. Any non-conservation
# represents an error in bookkeeping or
# algorithm design.
'''
print 'total energy (top+mul) =', \
final_energy + Emul
print 'corrected total energy =', \
final_energy + Emul \
- self.multiples_external_tidal_correction \
- self.multiples_internal_tidal_correction \
- self.multiples_integration_energy_error
'''
# Print info on all multiples associated with
# the current interaction.
if self.global_debug > 1:
for x in final_particles:
if hasattr(x, "child1") \
and not (getattr(x, "child1") is None):
print_multiple_simple(
trees.BinaryTreeOnParticle(x),
self.kepler)
count_resolve_encounter += 1
else:
ignore = 1
if self.global_debug > 1:
print('~'*60)
sys.stdout.flush()
else:
ignore = 1
self.number_of_collisions += 1
'''
io.write_set_to_file((self.before,
self.after, self.after_smalln),
"multiples-{0}.h5".format(self.number_of_collisions),
"amuse", names=('before', 'after', 'after_smalln'),
version="2.0", append_to_file=False)
'''
count_ignore_encounter += ignore
print('')
print('Resolved', count_resolve_encounter, 'encounters')
print('Ignored', count_ignore_encounter, 'encounters')
sys.stdout.flush()
self.gravity_code.synchronize_model()
self.channel_from_code_to_memory.copy()
# Copy the index (ID) as used in the module to the id field in
# memory. The index is not copied by default, as different
# codes may have different indices for the same particle and
# we don't want to overwrite silently.
self.channel_from_code_to_memory.copy_attribute("index_in_code", "id")
def expand_encounter(self, scattering_stars):
# Create an encounter particle set from the top-level stars.
# Add stars to the encounter set, add in components when we
# encounter a binary/multiple.
particles_in_encounter = Particles(0)
Emul = zero
for star in scattering_stars:
if star in self.root_to_tree:
tree = self.root_to_tree[star]
isbin, dEmul = get_multiple_energy2(tree, self.gravity_constant)
Emul += dEmul
openup_tree(star, tree, particles_in_encounter)
del self.root_to_tree[star]
else:
particles_in_encounter.add_particle(star)
return particles_in_encounter, Emul
def manage_encounter(self, global_time, star1, star2,
stars, gravity_stars, kep):
# Manage an encounter between star1 and star2. Stars is the
# python memory dataset (_inmemory_particles). Gravity_stars
# is the gravity code data (only used to remove the old
# components and add new ones). On entry, stars and
# gravity_stars should contain the same information. Return
# values are the change in top-level energy, the tidal error,
# and the integration error in the scattering calculation.
# Steps below follow those defined in the PDF description.
# print 'in manage_encounter'
# sys.stdout.flush()
# Record the state of the system prior to the encounter, in
# case we need to abort and return without making any changes.
#
# 'gravity_stars' is no longer included in the snapshot below,
# as it would make N individual calls to get_state...
snapshot = {
'global_time': global_time,
'star1': star1.copy(),
'star2': star2.copy(),
'stars': stars.copy(),
#'gravity_stars': gravity_stars.copy(),
'self.root_to_tree': self.root_to_tree.copy(),
'particles_in_encounter': Particles(0),
'scattering_stars': Particles(0)
}
snapshot['particles_in_encounter'].add_particle(snapshot['star1'])
snapshot['particles_in_encounter'].add_particle(snapshot['star2'])
# find_binaries(stars, self.gravity_constant)
# self.check_trees()
#----------------------------------------------------------------
# 1a. Build a list of stars involved in the scattering. Start
# with star1 and star2.
scattering_stars = Particles(particles = (star1, star2))
star1 = scattering_stars[0]
star2 = scattering_stars[1]
center_of_mass = scattering_stars.center_of_mass()
other_stars = stars - scattering_stars # probably only need perturbers?
# Brewer Mod: Check for a repeat encounter.
if (star1.id == self.old_star_1 and star2.id == self.old_star_2) \
or (star1.id == self.old_star_2 and star2.id == self.old_star_1):
self.repeat_count += 1
else:
self.repeat_count = 0
self.old_star_1 = star1.id
self.old_star_2 = star2.id
# 1b. Add neighbors if desired. Use a perturbation criterion.
# Also impose a simple neighbor veto, if specified. Start by
# sorting all stars by perturbation on the CM. Later, use
# neighbors only, if supported. TODO
sep12 = ((star1.position-star2.position)**2).sum().sqrt()
rad12 = star1.radius + star2.radius
# Sep12 is the separation of the two original components. It
# should be slightly less than the sum of their radii, rad12,
# but it may be much less in unexpected circumstances or if
# vetoing is in effect. Initial_scale sets the "size" of the
# interaction and the distance to which the final products
# will be rescaled. Rad12 also ~ the 90 degree scattering
# distance for two stars, and hence the natural limit on
# binary scale.
if not self.neighbor_veto:
initial_scale = self.initial_scale_factor * rad12
else:
initial_scale = self.initial_scale_factor * sep12
if self.global_debug > 1:
print('initial_scale =', initial_scale)
# The basic sort on other_stars is by perturbation, not
# distance. Maintain sorted lists of stars, distances (d),
# and perturbations (actually m/d**3).
distances = (other_stars.position - center_of_mass).lengths()
pert = other_stars.mass / distances**3
indices = numpy.argsort(-pert.number) # decreasing sort
sorted_stars = other_stars[indices]
sorted_distances = distances[indices]
sorted_perturbations = pert[indices]
fac12 = 0.5*(star1.mass + star2.mass)/sep12**3
largest_perturbers = []
if self.check_tidal_perturbation and len(sorted_stars) > 0:
if self.global_debug > 1:
print("sorted_stars", sorted_stars[:5])
print("sorted_distances", sorted_distances[:5])
print("sorted_perturbations", sorted_perturbations[:5]/fac12)
max_pert = sorted_perturbations[0]/fac12
largest_perturbers = [sorted_stars[0]]
# This should be replaced with something faster using
# numpy, like:
#
# largest_perturbers = sorted_stars[np.greater(sorted_perturbations,
# 0.025*sorted_perturbations[0])] - Josh.
for i in range(1, len(sorted_stars)):
if sorted_perturbations[i] > 0.025*sorted_perturbations[0]:
largest_perturbers.append(sorted_stars[i])
# Perturbation limit for identification as a neighbor.
pert_min = self.neighbor_perturbation_limit*fac12
for i in range(len(sorted_stars)): # NB no loop if len() = 0
star = sorted_stars[i]
# Include anything lying "inside" the binary, even if it
# is a weak perturber.
if sorted_perturbations[i] > pert_min \
or sorted_distances[i] < sep12:
if not self.neighbor_veto:
scattering_stars.add_particle(star)
if self.global_debug > 1:
print('added', end=' ')
if hasattr(star, 'id'):
print('star', star.id, end=' ')
else:
print('unknown star', end=' ')
print('to scattering list')
sys.stdout.flush()
snapshot['scattering_stars'].add_particle(star)
#initial_scale = sorted_distances[i] # don't expand!
else:
if self.global_debug > 0:
print('encounter vetoed by', \
star.id, 'at distance', \
sorted_distances[i], \
'pert =', sorted_perturbations[i]/fac12)
if self.repeat_count > 0: self.repeat_count -= 1
return True, 0., 0., 0., 0., 0., None
self.before.add_particles(scattering_stars)
# Note: sorted_stars, etc. are used once more, when checking
# for wide binaries (at 6b below).
#----------------------------------------------------------------
# 2a. Calculate the total internal and external potential
# energy of stars to remove from the gravity system, using the
# potential of scattering_stars relative to the other
# top-level objects in the stars list (later: just use
# neighbors TODO).
# Terminology from the PDF description:
E0 = scattering_stars.kinetic_energy() \
+ scattering_stars.potential_energy(G=self.gravity_constant)
phi_rem = potential_energy_in_field(scattering_stars,
stars-scattering_stars,
G=self.gravity_constant)
if self.global_debug > 2:
print('E0 =', E0)
print('phi_rem =', phi_rem)
# 2b. If there are no neighbors, separate star1 and star2 to
# some larger "scattering" radius. If neighbors exist,
# just start the "scattering" interaction in place.
# First define some basic properties of the top-level
# interaction.
M,a,e,r,E,tperi = get_component_binary_elements(star1, star2,
self.kepler, 1)
Etop = E*star1.mass*star2.mass/M
ttrans = self.gravity_constant*M/(4*abs(E))**1.5
# Note: transit time = 0.056 * period for a bound orbit.
if e < 1:
peri = a*(1-e)
apo = a*(1+e)
period = self.kepler.get_period()
else:
peri = a*(e-1)
apo = 1.e9*a # 1.e9 is large but otherwise arbitrary
period = 1.e9*ttrans
initial_scatter_scale = self.initial_scatter_factor * initial_scale
# Limit initial_scatter_scale (rescale_binary_components will
# impose a limit, but good to have the limit available at this
# level).
if initial_scatter_scale > 0.9*apo:
initial_scatter_scale = 0.9*apo
if len(scattering_stars) == 2:
#print "rescaling in:", (star1.position - star2.position).length()
rescale_binary_components(star1, star2, kep,
initial_scatter_scale, compress=False)
#print "rescaling out:", (star1.position - star2.position).length()
# 2c. Remove the interacting stars from the gravity module.
for s in scattering_stars:
gravity_stars.remove_particle(s)
#----------------------------------------------------------------
# 3. Create a particle set to perform the close encounter
# calculation.
# Note this has to delete the root_to_tree particle in
# multiples as we have no idea what the end product of the
# encounter will be. So deleting in expand_encounter really
# is a feature, not a bug. Don't mess with it! - Josh
particles_in_encounter, Emul_init \
= self.expand_encounter(scattering_stars)
# Terminology from the PDF description:
E1 = particles_in_encounter.kinetic_energy() + \
particles_in_encounter.potential_energy(G=self.gravity_constant)
dphi_1 = E1 - E0 - Emul_init
if self.global_debug > 2:
print('E1 =', E1)
print('Emul_init =', Emul_init)
print('dphi_1 =', dphi_1)
#----------------------------------------------------------------
# 4. Run the small-N encounter in the center of mass frame.
total_mass = scattering_stars.mass.sum()
cmpos = scattering_stars.center_of_mass()
cmvel = scattering_stars.center_of_mass_velocity()
particles_in_encounter.position -= cmpos
particles_in_encounter.velocity -= cmvel
#E1CM = particles_in_encounter.kinetic_energy() + \
# particles_in_encounter.potential_energy(G=self.gravity_constant)
#print 'E1 (CM) =', E1CM
# Relevant available length and time scales:
#
# Encounter:
# sep12 = actual separation
# rad12 = sum of radii (should be ~b90)
#
# Top-level orbit:
# a = orbital semimajor axis
# peri = orbital periastronn
# apo = orbital apastron
# tperi = time to pericenter
# period = orbital period, if defined
# ttrans = transit time
#
# Resonance:
# rvir = viral length scale
# tvir = virial time scale
rvir = self.gravity_constant*M/(4*abs(E1/M))
tvir = self.gravity_constant*M/(4*abs(E1/M))**1.5
if self.global_debug > 2:
print('Encounter:')
print(' sep12 =', sep12)
print(' rad12 =', rad12)
print('Top-level:')
print(' E/mu =', E)
print(' Etop =', Etop)
print(' M =', M)
print(' semi =', a)
print(' ecc =', e)
print(' peri =', peri)
print(' apo =', apo)
print(' tperi =', tperi)
print(' ttrans =', ttrans)
print(' period =', period)
print('Resonance:')
print(' rvir =', rvir)
print(' tvir =', tvir)
else:
if self.global_debug > 0:
print('M =', M, ' Etop =', Etop)
if self.global_debug > 1:
print('a =', a, ' e =', e, ' P =', period)
sys.stdout.flush()
# The original concept of this module was to follow the
# encounter as an isolated scattering experiment until it is
# cleanly resolved. In this case, the bookkeeping and
# post-encounter logic are straightforward. We expect that a
# clean resolution always eventually occurs, but for a complex
# interaction this may take a long time. In addition, the
# long time scales and large excursions of the intermediate
# orbit may render the physicality of the scattering approach
# questionable.
# The alternative approach pursued below is to try to define
# limiting length and time scales for the encounter, based on
# the initial configuration. Encounters exceeding these limits
# will be returned to the large-N simulation, possibly to be
# picked up again later. This approach leads to significant
# bookkeeping issues, and the "clean" original concept should
# always be retained as a fallback option.
# We need a reliable time scale to set end_time and delta_t
# for the scattering interaction. It is possible that we pick
# up an encounter very close to periastron, so tperi may not
# be useful. With reasonable limits on encounter size and a
# treatment of quasi-stable systems, a time limit on the
# smallN integration may not be needed, but impose some
# reasonable value here, just in case. Note that we have to
# deal with the possible consequences in resolve_collision().
# Note that the current check for quasistability requires that
# the system configuration remain unchanged for 10 outer
# orbital periods, and is not yet reliable. TODO
# If the encounter is a flyby, then the relevant scales are
# the orbital semimajor axis and transit time (*10, say). We
# don't want to follow a wide bound system onto a second orbit
# unless the size of the orbit is less than a few times the 90
# degree turnaround distance. If the encounter is a
# resonance, then the relative scales are the virial radius
# (*10) and virial time scale (*100). If it is bound but wide
# (and likely a flyby), then the relevant scales are the the
# orbital semimajor axis and period (*10, say).
# Also set a limit on the minimum scale, in case of retries.
end_time = max(2*abs(tperi), 10*ttrans, 100*tvir)
if E.number < 0: end_time = max(end_time, 10*period)
delta_t = max(1.5*abs(tperi), tvir)
if self.global_debug > 1:
print('end_time =', end_time)
print('delta_t =', delta_t)
# Note: radii used here should really be based on
# perturbation, not simply distance. TODO
orbit_scale = 2*a
if E.number < 0: orbit_scale = 1.1*a*(1+e) # a*(1+0.9*e)
if self.global_debug > 2:
print('orbit_scale =', orbit_scale)
# Final_scatter_scale is the scale at which we will terminate
# the smallN integration. This is a guess of the scale where,
# if the system exceeds it, the interacting particles can be
# decomposed into well separated pieces that can be returned
# to the N=body code, even if the encounter isn't over.
final_scatter_scale \
= max(self.final_scatter_factor * initial_scatter_scale,
orbit_scale, 10*rvir)
# Limit the scatter scale in case of a very wide orbit.
if orbit_scale > 2*initial_scatter_scale \
and final_scatter_scale > orbit_scale:
final_scatter_scale = orbit_scale
min_scatter_scale = 2*initial_scale # never go below this value
if min_scatter_scale >= 0.5*final_scatter_scale:
final_scatter_scale = 2*min_scatter_scale
# The integration ends when any particle is more than
# final_scatter_scale from the CM of the system (hence the
# factor of 2). RECONSIDER - needs a mass scale factor, and
# still OK for a wide orbit? TODO
final_scatter_scale /= 2
min_scatter_scale /= 2
if self.global_debug > 1:
print('final_scatter_scale =', final_scatter_scale)
print('min_scatter_scale =', min_scatter_scale)
# NOTE: to revert to the original concept, simply set
# final_scatter_scale and end_time to very large values.
if 0:
print('particles in encounter:')
print('position:', particles_in_encounter.position)
print('velocity:', particles_in_encounter.velocity)
try:
scatter_energy_error \
= self.resolve_collision(particles_in_encounter,
final_scatter_scale,
min_scatter_scale,
end_time, delta_t)
except DidNotFinishException:
# In this case, simply abort the encounter and continue
# the main simulation.
print("*** SmallN encounter did not finish. ", \
"Aborting and returning to top level.")
global_time = snapshot['global_time']
star1 = snapshot['star1']
star2 = snapshot['star2']
stars = snapshot['stars']
#gravity_stars = snapshot['gravity_stars']
gravity_stars.add_particle(star1)
gravity_stars.add_particle(star2)
gravity_stars.add_particles(snapshot['scattering_stars'])
self.root_to_tree = snapshot['self.root_to_tree']
zero_en = 0.0 * E0
return False, zero_en, zero_en, zero_en, zero_en, zero_en, \
snapshot['particles_in_encounter']
# Note that on return, particles_in_encounter contains CM
# nodes in the list.
E2CM = get_energy_of_leaves(particles_in_encounter,
G=self.gravity_constant)
Etop = particles_in_encounter.kinetic_energy() \
+ particles_in_encounter.potential_energy(G=self.gravity_constant)
if self.global_debug > 1:
print('E2 (CM) =', E2CM)
particles_in_encounter.position += cmpos
particles_in_encounter.velocity += cmvel
# Terminology from the PDF description:
E2 = get_energy_of_leaves(particles_in_encounter,
G=self.gravity_constant)
dE_int = E2 - E1 # should equal scatter_energy_error
err = (dE_int-scatter_energy_error)/max(E1,E2)
if abs(err) > 1.e-12:
if self.global_debug > 0:
print('*** warning: dE_int mismatch ***')
if self.global_debug > 1:
print('scatter_energy_error =', scatter_energy_error)
print('dE_int =', dE_int)
#print particles_in_encounter
print('E1 =', E1, 'E2 =', E2)
if self.global_debug > 2:
print('E2 =', E2)
print('scatter_energy_error =', scatter_energy_error)
print('dE_int =', dE_int)
#----------------------------------------------------------------
# 5a. Identify multiple structure after the encounter. First
# create an object to handle the new binary information.
#Brewer Mod: Create the appropriate COM particle for the pseudo-binary
'''
if self.repeat_count > 9:
print "repeat encounter detected; forcing binary creation"
pseudoCOM = Particles(1)
pseudoCOM.child1 = star1
pseudoCOM.child2 = star2
pseudoCOM.mass = star1.mass + star2.mass
pseudoCOM.position = cmpos
pseudoCOM.velocity = cmvel
pseudoCOM.radius = star1.radius + star2.radius
print particles_in_encounter
print pseudoCOM
particles_in_encounter.add_particles_in_store(pseudoCOM)
'''
#End Mod section.
binaries = trees.BinaryTreesOnAParticleSet(particles_in_encounter,
"child1", "child2")
# 5b. Compress the top-level nodes before adding them to the
# gravity code. Also recompute the external potential and
# optionally absorb the tidal error into the top-level
# nodes of the encounter list. Finally, add the change in
# top-level energy of the interacting subset into dEmult,
# so E(ph4) + dEmult should be conserved.
# Single stars.
stars_not_in_a_multiple = binaries.particles_not_in_a_multiple()
#print 'stars_not_in_a_multiple:'
#print stars_not_in_a_multiple
# Multiple centers of mass.
roots_of_trees = binaries.roots()
#----------------------------------------------------------------
# 6a. Scale to a radius slightly larger than the initial one.
# Rescaling does just that -- neither computes nor attempts to
# absorb the tidal error. If we want to absorb the tidal
# error rather than simply recording it, do so after splitting
# wide binaries below. TODO
final_scale = self.final_scale_factor * initial_scale
# Note that stars_not_in_a_multiple and roots_of_trees are
# simply convenient partitions of particles_in_encounter.
# They are pointers into the underlying particle set.
scale_top_level_list(stars_not_in_a_multiple,
roots_of_trees,
self.kepler,
final_scale,
self.gravity_constant,
self.global_debug)
# 6b. Break up wide top-level binaries. Do this after
# rescaling because we want to preserve binary binding
# energies. Also place the wide binaries at pericenter to
# minimize the tidal error.
# Number of top-level nodes.
lt = len(stars_not_in_a_multiple) + len(roots_of_trees)
# Roots to be deleted after the loop.
roots_to_remove = []
for root in roots_of_trees:
comp1 = root.child1
comp2 = root.child2
mass,semi,ecc,r,E,t = \
get_component_binary_elements(comp1,
comp2,
self.kepler)
apo = semi*(1+ecc)
if self.retain_binary_apocenter:
binary_scale = apo
else:
binary_scale = 2*semi # (the more conservative choice)
# Estimate the maximum perturbation on this binary due to
# its current strongest external perturber.
max_perturbation = 0.0
if len(sorted_perturbations) > 0:
max_perturbation = \
2*sorted_perturbations[0]*binary_scale**3/mass
perturber = sorted_stars[0]
perturber_distance = sorted_distances[0]
# Check that other stars involved in the encounter but not
# in this multiple are not the dominant perturbation.
stars_to_check = Particles()
for t in binaries.iter_binary_trees():
if t.particle != root: # exclude self interaction
stars_to_check.add_particles(t.get_leafs_subset())
#while len(roots_to_check) > 0:
# r = roots_to_check.pop()
# if r != root:
# if hasattr(r, "child1"):
# if r not in roots_to_check:
# roots_to_check.append(r)
# else:
# stars_to_check.extend(r)
try:
stars_to_check.remove_particle(star1)
except KeysNotInStorageException:
#print 'failed to remove star1'
pass
try:
stars_to_check.remove_particle(star2)
except KeysNotInStorageException:
#print 'failed to remove star2'
pass
# Check perturbation due to stars_to_check on root.
for s in stars_to_check:
distance = (s.position - root.position).length()
pert = s.mass / distance**3
s_perturbation = 2*pert*binary_scale**3/mass
if self.global_debug > 1:
print("star %s, distance %s, pert %s, s_pert %s, max_pert %s" \
% (s.id, distance, pert, s_perturbation,
max_perturbation))
if s_perturbation > max_perturbation:
max_perturbation = s_perturbation
perturber = s
perturber_distance = distance
#if binary_scale > rad12:
if max_perturbation < self.wide_perturbation_limit \
or self.repeat_count > 9:
if self.global_debug > 0:
print('accepting lightly perturbed or repeat binary', \
name_pair(comp1,comp2))
if self.global_debug > 1:
print(' semi =', semi, 'E/mu =', E)
print(' apo =', apo, 'peri =', semi*(1-ecc))
if max_perturbation > 0:
if self.global_debug > 1:
print(' strongest perturber is', perturber.id, \
'with apo perturbation', max_perturbation)
print(' nearest neighbor is', perturber.id, \
'at distance', perturber_distance)
print(' repeat_count =', self.repeat_count)
else:
if max_perturbation > 0:
print(' perturbation = 0')
self.repeat_count = 0 # probably unnecessary
sys.stdout.flush()
else:
if self.global_debug > 0:
if max_perturbation > 0:
print('splitting perturbed binary', \
name_pair(comp1,comp2))
if self.global_debug > 1:
print(' semi =', semi, 'E/mu =', E)
print(' apo =', apo, 'peri =', semi*(1-ecc))
print(' strongest perturber is', perturber.id, \
'with apocenter perturbation', max_perturbation)
print(' nearest neighbor is', perturber.id, \
'at distance', perturber_distance)
sys.stdout.flush()
# See the "special case" logic in
# scale_top_level_list(). If this is a sole bound
# top-level object, it has already been scaled to the
# desired separation and should *not* be modified
# here. Otherwise, move the components past
# periastron to initial_scatter_scale.
if lt > 1:
# Could use rescale_binary_components() for this,
# but code here is more compact, since we have
# already initialized the kepler structure.
cmpos = root.position
cmvel = root.velocity
if self.global_debug > 1:
print('moving binary to periastron')
self.kepler.advance_to_periastron()
if self.global_debug > 1:
print('advancing binary to', final_scale)
sys.stdout.flush()
self.kepler.advance_to_radius(final_scale)
dx = quantities.AdaptingVectorQuantity()
dx.extend(kep.get_separation_vector())
dv = quantities.AdaptingVectorQuantity()
dv.extend(kep.get_velocity_vector())
f1 = comp1.mass/root.mass
comp1.position = cmpos - f1*dx
comp1.velocity = cmvel - f1*dv
comp2.position = cmpos + (1-f1)*dx
comp2.velocity = cmvel + (1-f1)*dv
# Changing the list here would disrupt the loop
# bookkeeping. Remove any split-up roots after the
# loop, then recalculate all data structures and
# restart the loop.
#particles_in_encounter.remove_particle(root)
roots_to_remove.append(root)
# Note that removing the root will reinstate the
# children as top-level objects:
if len(roots_to_remove) > 0:
for r in roots_to_remove:
particles_in_encounter.remove_particle(r)
# Recompute the tree structure.
binaries = \
trees.BinaryTreesOnAParticleSet(particles_in_encounter,
"child1", "child2")
# Single stars.
stars_not_in_a_multiple = binaries.particles_not_in_a_multiple()
# Multiple centers of mass.
roots_of_trees = binaries.roots()
#----------------------------------------------------------------
# 7. Add the new top-level nodes to the gravity module.
top_level_nodes = stars_not_in_a_multiple + roots_of_trees
# Terminology from the PDF description:
KE3 = top_level_nodes.kinetic_energy()
E3 = KE3 + top_level_nodes.potential_energy(G=self.gravity_constant)
phi_ins = potential_energy_in_field(top_level_nodes,
stars - scattering_stars,
G=self.gravity_constant)
Emul_final = zero
for tree in binaries.iter_binary_trees():
isbin, dEmul = get_multiple_energy2(tree, self.gravity_constant)
Emul_final += dEmul
dphi_2 = E2 - Emul_final - E3
if self.global_debug > 2:
print('E3 =', E3)
print('phi_ins =', phi_ins)
print('Emul_final =', Emul_final)
print('dphi_2 =', dphi_2)
# 7a. Set radii to reflect multiple structure.
set_radii(particles_in_encounter, self.kepler, self.global_debug)
# Print diagnostics on added particles. Strip dimensions
# because of numpy problem noted below.
if self.global_debug > 0:
print('final top-level:', end=' ')
r = zero
v = zero
vr = zero
for i in top_level_nodes:
if self.global_debug > 0:
print(i.id, '('+str(i.radius)+')', end=' ')
for j in top_level_nodes:
if i.id > j.id:
rij = ((i.position-j.position)**2).sum().sqrt()
if rij > r:
r = rij
v = ((i.velocity-j.velocity)**2).sum().sqrt()
vr = ((j.velocity-i.velocity) \
* (j.position-i.position)).sum()
if self.global_debug > 0:
print('')
print('M =', top_level_nodes.mass.sum(), end=' ')
print('Etop =', Etop)
if self.global_debug > 1 and len(top_level_nodes) > 1:
print(' r =', r)
print(' v =', v)
print(' v.r =', vr)
#print 'top_level_nodes:'
#print top_level_nodes
sys.stdout.flush()
# Update the gravity module with the new data.
self.after.add_particles(stars_not_in_a_multiple)
# 7b. Add stars not in a binary to the gravity code.
if len(stars_not_in_a_multiple) > 0:
#print 'adding stars_not_in_a_multiple:'
#print stars_not_in_a_multiple
gravity_stars.add_particles(stars_not_in_a_multiple)
# 7c. Add the roots to the gravity code
multiples_particles = Particles()
multiples_particles.id = None
for tree in binaries.iter_binary_trees():
tree.particle.id = assign_id_to_root(tree) # assign CM ID (was 0)
#tree.particle.components = subset
#print 'adding particle:'
#print tree.particle
gravity_stars.add_particle(tree.particle)
self.after.add_particle(tree.particle)
multiples_particles.add_particle(tree.particle)
if self.global_debug > 1:
print("multiples: interaction products: singles:", \
stars_not_in_a_multiple.id, "multiples: ", \
multiples_particles.id)
# 7d. Store all trees in memory for later reference.
# Note this is actually where the trees get added to the
# multiples module, and is the appropriate place to modify any
# of the leaves / roots in the module. Also this is what as
# getting deleted in a call to expand_encounter, but is
# unmodified in a call to stars - Josh.
for tree in binaries.iter_binary_trees():
self.root_to_tree[tree.particle] = tree.copy()
# Return enough information to monitor all energy errors.
dE_top = E3 - E0
dphi_top = phi_ins - phi_rem
dEmul = Emul_final - Emul_init
dphi_int = dphi_2 - dphi_1
#-------------------------------------------------------
# Flag (but don't yet correct) large tidal corrections.
dph = dphi_top/KE3
if abs(dph) > 1.e-2: # 1.e-2 is small but otherwise arbitrary
if self.global_debug > 0:
print('*** tidal correction =', dph, 'KE ***')
#print 'initial configuration: phi =', \
# potential_energy_in_field(scattering_stars,
# stars - scattering_stars,
# G=self.gravity_constant)
pminmin, fminmin, dxminmin \
= find_nn2(scattering_stars, stars-scattering_stars,
self.gravity_constant)
if pminmin != None:
if self.global_debug > 1:
print('closest field/list pair is', \
str(fminmin.id)+'/'+str(pminmin.id), \
' distance/scale =', dxminmin/initial_scale)
#print 'final configuration: phi =', \
# potential_energy_in_field(top_level_nodes,
# stars - scattering_stars,
# G=self.gravity_constant)
pminmin, fminmin, dxminmin \
= find_nn2(top_level_nodes, stars-scattering_stars,
self.gravity_constant)
if pminmin != None:
if self.global_debug > 1:
print('closest field/list pair is', \
str(fminmin.id)+'/'+str(pminmin.id), \
' distance/scale =', dxminmin/initial_scale)
#-------------------------------------------------------
# Experimental code to try to correct external tidal errors.
# Compare dphi_top with range of possible quadrupole
# corrections due to closest perturber. Start with the
# simplest case.
#
# tidal potential change is dphi_top
# multiple center of mass is cmpos
# perturbers are in largest_perturbers
if self.check_tidal_perturbation \
and len(particles_in_encounter) == 2 and len(top_level_nodes) == 2:
print('checking quadrupole perturbations')
# *** Retain unitless code for now (Steve, 4/18). ***
m1 = top_level_nodes[0].mass
m2 = top_level_nodes[1].mass
dx = top_level_nodes[1].position - top_level_nodes[0].position
x = (dx**2).sum().sqrt()
print('x =', x, 'M =', m1+m2)
for p in largest_perturbers:
m3 = p.mass
id = p.id
dr = p.position - cmpos
r = (dr**2).sum().sqrt()
phi = -self.gravity_constant*M*m3/r
dphiQ = -(self.gravity_constant*(m1*m2/M)*m3/r)*(x/r)**2
print(' ', str(id)+':', 'r =', r, 'm =', p.mass, \
'dphi_top/dphiQ =', dphi_top/dphiQ)
return False, dE_top, dphi_top, dEmul, dphi_int, dE_int, \
particles_in_encounter
def resolve_collision(self,
particles,
final_scatter_scale,
min_scatter_scale,
end_time,
delta_t):
pre = 'encounter:' # identifier for all output
# Take the system described by particles and evolve it forward
# in time until it is over. Don't update global quantities,
# don't interpret the outcome. Return the energy error due to
# the smallN integration.
if self.debug_encounters:
delta_t *= 0.1
initial_delta_t = delta_t
if self.global_debug > 1:
print(pre, 'evolving to time', end_time)
print(pre, 'initial step =', initial_delta_t)
# Allow delta_t to increase, with an upper limit. (The factor
# of 25 below should permit quasi-stable systems to be
# detected.)
delta_t_max = 64*delta_t
while delta_t_max < end_time/25: delta_t_max *= 2
# Save some useful initial quantities.
initial_position = particles.position
initial_velocity = particles.velocity
initial_cmvel = particles.center_of_mass_velocity() # should be 0
initial_ke = particles.kinetic_energy()
initial_end_time = end_time
# Allow the possibility of repeating the encounter if it fails
# to terminate.
loop_count = 0
loop_max = 10
pert = 0.001 # retry option 1
pert_fac = 10.**(1./loop_max)
scale_fac = (min_scatter_scale
/ final_scatter_scale)**(2./loop_max) # option 2
end_time_fac = 1.5 # option 2
over = 0
while loop_count < loop_max:
loop_count += 1
#print pre, 'loop_count =', loop_count
#sys.stdout.flush()
resolve_collision_code \
= self.resolve_collision_code_creation_function()
# Channel to copy values from the code to the set in memory.
channel = resolve_collision_code.particles.new_channel_to(particles)
time = 0 * end_time
resolve_collision_code.set_time(time)
resolve_collision_code.particles.add_particles(particles)
resolve_collision_code.commit_particles()
delta_t = initial_delta_t
resolve_collision_code.set_break_scale(final_scatter_scale)
initial_scatter_energy \
= self.get_total_energy(resolve_collision_code)
if self.global_debug > 1:
print(pre, 'number_of_stars =', len(particles), ' ', \
particles.id)
print(pre, 'initial energy =', initial_scatter_energy)
#print particles
if self.debug_encounters:
print(pre, '### START ENCOUNTER ###')
print(pre, '### snapshot at time %f' % 0.0)
for p in particles:
print(pre, '### id=%d, x=%f, y=%f, z=%f,'\
'vx=%f, vy=%f, vz=%f' % \
(p.id, p.x.number, p.y.number, p.z.number,
p.vx.number, p.vy.number, p.vz.number))
sys.stdout.flush()
#------------------------------------------------------------
#
# If the encounter fails to terminate within the specified
# time we have some options:
#
# 1. Try perturbing the encounter in various energy
# conservative ways, starting from the original
# velocities.
#
# 2. Modify the termination conditions. This is
# potentially less expensive, but may not lead to a clean
# outcome. Increasing end_time simply involves extending
# the while loop; changing final_scatter_scale requires a
# new calculation.
option = 2
inner_loop = 1
#############################################################
# Set this to enable step-by-step debugging output.
# resolve_collision_code.parameters.outfile='abc.dat'
#
# e.g.
# if self.gravity_code.model_time.number > 31.4159:
# resolve_collision_code.parameters.outfile = 'debug.dat'
#############################################################
while time < end_time:
tt = time
time += delta_t
# print pre, '...to time', time
# sys.stdout.flush()
# Work with internal substeps of initial_delta_t to
# allow checks for quasi-stable motion.
while tt < time:
tt += initial_delta_t
if tt > time: tt = time
if 0:
print(pre, ' ...', time, tt, \
'model_time =', \
resolve_collision_code.model_time)
sys.stdout.flush()
resolve_collision_code.evolve_model(tt)
if 0:
print(pre, ' ...back:', \
': model_time =', \
resolve_collision_code.model_time)
sys.stdout.flush()
tt = resolve_collision_code.model_time
# DEBUGGING:
if self.debug_encounters:
print(pre, '### snapshot at time %f' \
% time.number)
#resolve_collision_code.update_particle_tree()
#resolve_collision_code.update_particle_set()
resolve_collision_code.particles \
.synchronize_to(particles)
channel.copy()
for p in particles:
print(pre, '### id=%d, x=%f, y=%f, z=%f,'\
'vx=%f, vy=%f, vz=%f' % \
(p.id, p.x.number, p.y.number, p.z.number,
p.vx.number, p.vy.number, p.vz.number))
sys.stdout.flush()
# The argument final_scatter_scale is used to
# limit the size of the system. It has to be
# supplied again because the code that determines
# if the scattering is over isn't necessarily the
# same as resolve_collision_code. However,
# currently only smallN has an "is_over()"
# function.
#
# Return values: 0 - not over
# 1 - over
# 2 - quasi-stable system
# 3 - size exceeded limit
#
# Note that this is really a stopping condition,
# and should eventually be handled that way. TODO
#
# If over = 3, if the parameters were properly
# chosen, the resulting system should stil be
# usable. The interface function will take steps
# to return proper hierarchical structure even if
# the inner subsystem is not well resolved.
#
# Note that we are currently ignoring any
# possibility of a physical collision during the
# multiples encounter. TODO
over = resolve_collision_code.is_over\
(final_scatter_scale,
0) # verbose = 0
if over:
final_scatter_energy \
= self.get_total_energy(resolve_collision_code)
scatter_energy_error \
= final_scatter_energy - initial_scatter_energy
if self.global_debug > 1:
print(pre, 'over =', over, 'at time', tt)
#print pre, 'initial energy =', \
# initial_scatter_energy
#print pre, 'final energy =', \
# final_scatter_energy
#print pre, 'energy error =', \
# scatter_energy_error
print(pre, 'fractional energy error =', \
scatter_energy_error/initial_scatter_energy)
if self.debug_encounters:
print(pre, '### END ENCOUNTER ###')
sys.stdout.flush()
# Create a tree in the module representing the
# binary structure.
resolve_collision_code.update_particle_tree(over)
# Note: A quasi-stable system (over = 2)
# should be handled properly, as it will
# appear to be a bound top-level binary. If
# over = 3, the top level should be a receding
# bound or unbound system, and the tree
# structure should still be usable.
# Note that center of mass particles are now
# part of the particle set.
# Return the tree structure to AMUSE.
# Children are identified by
# get_children_of_particle in interface.??,
# and the information is returned in the copy
# operation.
resolve_collision_code.update_particle_set()
resolve_collision_code.particles \
.synchronize_to(particles)
#print "resolve_collision_code.particles.radius"\
# , resolve_collision_code.particles.radius
channel.copy()
#resolve_collision_code.stop()
if 1:
# Count number of top-level multiples. Must
# be >0 for the post-encounter bookkeeping to
# work.
binaries = trees.BinaryTreesOnAParticleSet(
particles, "child1", "child2")
singles = binaries.particles_not_in_a_multiple()
multiples = binaries.roots()
if self.global_debug > 0:
print('after', pre, len(singles), \
'single(s),', \
len(multiples), 'multiple(s)')
return scatter_energy_error
if tt >= 0.99999999*time: break # avoid roundoff
# -- end of while tt < time: loop --
time = resolve_collision_code.model_time
if not self.debug_encounters:
if delta_t < delta_t_max \
and time > 0.999999*4*delta_t:
delta_t *= 2
if self.global_debug > 1:
print(pre, 'setting delta_t =', delta_t)
sys.stdout.flush()
if time > 0.99999999*end_time: # avoid roundoff
# Encounter has failed to terminate and we are
# about to break out of the loop. If option = 2
# and this is an odd-numbered loop, just increase
# end_time (once only). Otherwise, break and
# allow other options to take effect.
if option == 2 and 2*(loop_count/2) != loop_count \
and inner_loop == 1:
# Adjust the bulk scattering parameters.
# Simply increase end_time.
end_time *= end_time_fac
# Same print output as below.
if self.global_debug > 1:
print(pre, 'loop', loop_count, ' over =', over)
print('increasing end_time to', end_time)
print('-----')
inner_loop = 2
loop_count += 1
else:
break
# -- end of while time < end_time: loop --
# As currently coded, if we get here we have not
# overwritten the original particle set, particles.
# Nevertheless, we restore particle data here prior
# to a retry.
particles.position = initial_position
particles.velocity = initial_velocity
if self.global_debug > 1:
print(pre, 'loop', loop_count, ' over =', over)
if option == 1:
# Perturbing the encounter can be done in several
# ways, of increasing intrusiveness and decreasing
# reasonableness.
#
# 1. Randomize the phases of all binary orbits.
# 2. Randomize the orientations of all binary orbits.
# 3. Perturb (jiggle) the top-level orbits.
# 4. Jiggle all velocities.
#
# In all cases, the total energy must be preserved and
# the CM motion must remain at the origin. However,
# in case 4, the total multiple energy and hence the
# bookkeeping will be compromised unless we explicitly
# correct it -- need an additional return value.
#
# TODO: We should implement options 1-3 -- these
# require scattering_stars to be passed as an
# argument.
#
# For now, choose the least desirable but easiest
# option #4, with increasing pert as loop_count
# increases.
ran = 1 + pert*(2*numpy.random.random(len(particles)) - 1)
for k in range(len(particles)):
particles[k].velocity *= ran[k]
# Preserve momentum and energy.
final_cmvel = particles.center_of_mass_velocity()
particles.velocity -= final_cmvel - initial_cmvel
final_ke = particles.kinetic_energy()
particles.velocity *= math.sqrt(initial_ke/final_ke)
pert *= pert_fac
print('retrying with pert =', pert)
elif option == 2:
# Adjust the bulk scattering parameters. First
# increase end_time, then reduce and increase
# final_scatter_scale, etc. Should only be able to
# get here if loop_count is even. End_time has
# already been increased. Use the larger version, but
# decrease final_scatter_scale.
final_scatter_scale *= scale_fac
print('retrying with final_scatter_scale =', final_scatter_scale)
print(' end_time =', end_time)
print('-----')
raise DidNotFinishException(
pre + \
" Small-N simulation did not finish before end time {0}".
format(end_time)
)
def openup_tree(star, tree, particles_in_encounter):
# List the leaves.
leaves = tree.get_leafs_subset()
#print 'leaves:'
#print leaves
original_star = tree.particle
# Compare with the position stored when replacing the particles
# with the root particle, and move the particles accordingly.
# Note that once the CM is in the gravity module, the components
# are frozen and the coordinates are absolute, so we need the
# original coordinates to offset them later.
# Maybe better just to store relative coordinates? TODO
dx = star.x - original_star.x
dy = star.y - original_star.y
dz = star.z - original_star.z
dvx = star.vx - original_star.vx
dvy = star.vy - original_star.vy
dvz = star.vz - original_star.vz
leaves.x += dx
leaves.y += dy
leaves.z += dz
leaves.vx += dvx
leaves.vy += dvy
leaves.vz += dvz
particles_in_encounter.add_particles(leaves)
def phi_tidal(star1, star2, star3, G): # compute tidal potential of
# (star1,star2) relative to star3
phi13 = -G*star1.mass*star3.mass/(star1.position-star3.position).length()
phi23 = -G*star2.mass*star3.mass/(star2.position-star3.position).length()
m12 = star1.mass + star2.mass
cm = Particles([star1, star2]).center_of_mass()
phicm = -G*m12*star3.mass/(star3.position-cm.position).length
return phi13+phi23-phicm
def find_nn(plist, field, G):
# Find and print info on the closest field particle (as
# measured by potential) to any particle in plist.
pminmin = None
fminmin = None
phiminmin = zero
for f in field:
dx = (plist.position - f.position).lengths()
phi = -G*f.mass*plist.mass/dx
phimin = zero
dxmin = 1.e30
pmin = None
for i in range(len(phi)):
if phi[i] < phimin:
phimin = phi[i]
dxmin = dx[i]
pmin = plist[i]
if phimin < phiminmin:
phiminmin = phimin
pminmin = pmin
dxminmin = dxmin
fminmin = f
return pminmin, fminmin, dxminmin
def find_nn2(plist, field, G):
# Find and print info on the closest field particle (as
# measured by potential) to any particle in plist.
# revised, faster version of find_nn
pminmin = None
fminmin = None
phiminmin = zero
for p in plist:
dx = (p.position - field.position).lengths()
phi = -G*field.mass*p.mass/dx
#phi = numpy.divide(numpy.prod([-1,G,field.mass,p.mass]),dx)
phimin = zero
dxmin = 1.e30
pmin = None
j = numpy.argmin(phi.number)
phimin = phi[j]
dxmin = dx[j]
pmin = p
if phimin < phiminmin:
phiminmin = phimin
pminmin = pmin
dxminmin = dxmin
fminmin = field[j]
return pminmin, fminmin, dxminmin
def find_binaries(particles, G):
# Search for and print out bound pairs using a numpy-accelerated
# N^2 search.
for p in particles:
mu = p.mass*particles.mass/(p.mass+particles.mass)
dr = (particles.position - p.position).lengths()
dv = (particles.velocity - p.velocity).lengths()
E = 0.5*mu*dv*dv - G*p.mass*particles.mass/dr
indices = numpy.argsort(E.number)
sorted_E = E[indices]
Emin = sorted_E[1].number
if Emin < -1.e-4 and p.id < particles[indices[1]].id:
print('bound', p.id, particles[indices[1]].id, Emin)
def potential_energy_in_field(particles, field_particles,
smoothing_length_squared = zero,
G=constants.G):
"""
Returns the total potential energy of the particles in the particles
set. argument field_particles: the external field consists of these
(i.e. potential energy is calculated relative to the field
particles) argument smoothing_length_squared: the smoothing length
is added to every distance. argument G: gravitational constant,
need to be changed for particles in different units systems
"""
if len(field_particles) == 0:
return zero
sum_of_energies = zero
for particle in particles:
dr_squared = (particle.position-field_particles.position).lengths_squared()
dr = (dr_squared+smoothing_length_squared).sqrt()
m_m = particle.mass * field_particles.mass
potentials = -m_m/dr
energy_of_this_particle = potentials.sum()
sum_of_energies += energy_of_this_particle
imin = numpy.argmin(potentials.number)
imin = numpy.argmin(dr.number)
return G * sum_of_energies
def offset_particle_tree(particle, dpos, dvel):
# Recursively offset a particle and all of its descendants by
# the specified position and velocity.
if not particle.child1 is None:
offset_particle_tree(particle.child1, dpos, dvel)
if not particle.child2 is None:
offset_particle_tree(particle.child2, dpos, dvel)
particle.position += dpos
particle.velocity += dvel
# print 'offset', int(particle.id), 'by', dpos; sys.stdout.flush()
def rescale_binary_components(comp1, comp2, kep, scale, compress=True):
# Rescale the two-body system consisting of comp1 and comp2 to lie
# inside (compress=True) or outside (compress=False) distance
# scale of one another. If compress=True, the final orbit will be
# receding; otherwise it will be approaching. In a typical case,
# scale is comparable to the separation at which the interaction
# started. It is possible that the input system is very close to
# periastron. To avoid problems with very eccentric systems,
# force the system to be scaled to a separation of at least
# 0.1*scale (0.1 is ~arbitrary: should be <1, and not too small).
if compress:
pre = 'rescale_binary_components(-):'
else:
pre = 'rescale_binary_components(+):'
pos1 = comp1.position
pos2 = comp2.position
sep12 = ((pos2-pos1)**2).sum()
mass1 = comp1.mass
mass2 = comp2.mass
total_mass = mass1 + mass2
vel1 = comp1.velocity
vel2 = comp2.velocity
cmpos = (mass1*pos1+mass2*pos2)/total_mass
cmvel = (mass1*vel1+mass2*vel2)/total_mass
mass = comp1.mass + comp2.mass
rel_pos = pos2 - pos1
rel_vel = vel2 - vel1
if 0:
print(pre, 'mass =', mass)
print(pre, 'pos =', rel_pos)
print(pre, 'vel =', rel_vel)
kep.initialize_from_dyn(mass,
rel_pos[0], rel_pos[1], rel_pos[2],
rel_vel[0], rel_vel[1], rel_vel[2])
M,th = kep.get_angles()
a,e = kep.get_elements()
rescale = (compress and sep12 > scale**2) \
or (not compress and sep12 < scale**2)
min_scale = 0.1*scale # see note above
if 0:
print(pre, 'M, th, a, e, =', M, th, a, e)
print(pre, 'compress =', compress)
print(pre, sep12, scale**2, min_scale**2)
if compress == True:
rescale = rescale or sep12 < min_scale**2
if rescale:
#print 'rescaling components', int(comp1.id), \
# 'and', int(comp2.id), 'to separation', scale
# sys.stdout.flush()
# print pre, 'a, e =', a, e
if e < 1:
peri = a*(1-e)
apo = a*(1+e)
else:
peri = a*(e-1)
apo = peri+a # OK - used only to reset scale
if compress:
# Logic here is to handle special configurations.
limit = peri + 1.e-4*(apo-peri) # numbers are
if limit > 1.1*peri: limit = 1.1*peri # ~arbitrary
if limit < min_scale: limit = min_scale
if scale < limit:
# print pre, 'changed scale from', scale, 'to', limit
scale = limit
if M < 0:
# print pre, 'advance_to_periastron'
kep.advance_to_periastron()
# print pre, 'advance_to_radius', scale
kep.advance_to_radius(scale)
else:
if kep.get_separation() < scale:
# print pre, 'advance_to_radius', scale
kep.advance_to_radius(scale)
else:
# print pre, 'return_to_radius', scale
kep.return_to_radius(scale)
# Note: Always end up on an outgoing orbit. If periastron
# > scale, we are now just past periastron.
else:
limit = apo - 0.01*(apo-peri)
# print pre, "limit:", limit, apo, peri, scale , M, e
if scale > limit:
# print pre, 'changed scale from', scale, 'to', limit
scale = limit
#print "INPUT:", kep.get_separation_vector()
#print "true_anomaly:", M, kep.get_separation() , scale
if M > 0:
kep.return_to_periastron()
kep.return_to_radius(scale)
else:
if kep.get_separation() < scale:
kep.return_to_radius(scale)
else:
kep.advance_to_radius(scale)
# Note: Always end up on an incoming orbit. If
# apastron < scale, we are now just before apastron.
#print 'scale =', scale, 'sep =', kep.get_separation(), 'M =', M
new_rel_pos = kep.get_separation_vector()
new_rel_vel = kep.get_velocity_vector()
# Problem: the vectors returned by kepler are lists, not numpy
# arrays, and it looks as though we can say comp1.position =
# pos, but not comp1.position[k] = xxx, as we'd like... Also,
# Steve doesn't know how to copy a numpy array with units...
# TODO - help?
#print "REL POS:", new_rel_pos
newpos1 = pos1 - pos1 # stupid trick to create zero vectors
newpos2 = pos2 - pos2 # with the proper form and units...
newvel1 = vel1 - vel1
newvel2 = vel2 - vel2
frac2 = mass2/total_mass
for k in range(3):
dxk = new_rel_pos[k]
dvk = new_rel_vel[k]
newpos1[k] = cmpos[k] - frac2*dxk
newpos2[k] = cmpos[k] + (1-frac2)*dxk
newvel1[k] = cmvel[k] - frac2*dvk
newvel2[k] = cmvel[k] + (1-frac2)*dvk
# Perform the changes to comp1 and comp2, and recursively
# transmit them to the (currently absolute) coordinates of all
# lower components.
#print "DP1:", newpos1-pos1, hasattr(comp1, 'child1')
#print "DP2:", newpos2-pos2, hasattr(comp2, 'child1')
if hasattr(comp1, 'child1'):
offset_particle_tree(comp1, newpos1-pos1, newvel1-vel1)
if hasattr(comp2, 'child1'):
offset_particle_tree(comp2, newpos2-pos2, newvel2-vel2)
# print pre, 'done'
sys.stdout.flush()
return a
def offset_children(n, dx, dv):
if n.child1 != None:
n.child1.position -= dx
n.child1.velocity -= dv
offset_children(n.child1, dx, dv)
if n.child2 != None:
n.child2.position -= dx
n.child2.velocity -= dv
offset_children(n.child2, dx, dv)
def compress_nodes(node_list, scale, G):
local_debug = False
# Compress (or expand) the top-level nodes in node_list to lie
# within diameter scale. Rescale velocities to conserve total
# energy (but currently not angular momentum -- TODO).
pre = 'compress_nodes:'
# Compute the center of mass position and velocity of the
# top-level system.
cmpos = node_list.center_of_mass()
cmvel = node_list.center_of_mass_velocity()
# Child positions and velocities will not be explicitly changed by
# the scaling. Temporarily store child data as offsets relative
# to the root. We will undo this at the end, immediately before
# returning.
for n in node_list:
if n.child1 != None:
dx = n.position
dv = n.velocity
offset_children(n, dx, dv)
if local_debug:
print('node_list:')
print(node_list)
print('top_level:')
print_top_level(node_list, G)
x0 = (node_list[0].position**2).sum().sqrt()
lunit = x0/x0.number
v0 = (node_list[0].velocity**2).sum().sqrt()
vunit = v0/v0.number
vunit2 = vunit**2
# Compute various measures of the size, potential, and kinetic
# energy of the system in the center of mass frame.
size = zero # max distance(**2) from center of mass
rijmin = 1.e100*lunit # minimum separation
imin = -1
jmin = -1
phimin = zero # minimum potential
ipmin = -1
jpmin = -1
n = len(node_list)
pot = zero
kin = zero
dr = numpy.zeros((n,n)) # unit = lunit
dv2 = numpy.zeros((n,n)) # unit = vunit2
for i in range(n):
m = node_list[i].mass
posi = node_list[i].position
pos = posi - cmpos
veli = node_list[i].velocity
vel = veli - cmvel
r2 = (pos**2).sum()
if r2 > size:
size = r2
kin += m*(vel**2).sum()
dpot = zero
for j in range(i+1,n):
mj = node_list[j].mass
dposj = node_list[j].position - posi
rij = (dposj**2).sum().sqrt()
dphij = -G*mj/rij
dpot += dphij
phij = m*dphij
if rij < rijmin:
rijmin = rij
imin = i
jmin = j
if phij < phimin:
phimin = phij
ipmin = i
jpmin = j
dvelj = node_list[j].velocity - veli
dr[i,j] = rij/lunit
dv2[i,j] = (dvelj**2).sum()/vunit2
if dpot != zero:
pot += m*dpot
size = size.sqrt()
kin /= 2
rphmin = -(node_list[ipmin].mass*node_list[jpmin].mass)/phimin
if local_debug:
print(pre, 'scale =', scale)
print(pre, 'size =', size)
print(pre, 'rijmin =', rijmin, node_list[imin].id, node_list[jmin].id)
print(pre, 'rphmin =', rphmin, node_list[ipmin].id, node_list[jpmin].id)
fac = 0.5*scale/size # scale to radius
#fac = scale/rijmin # scale to minimum distance
#fac = scale/rphmin # scale to minimum potential distance
if local_debug:
print(pre, 'fac =', fac)
# Compress (or expand) the system and increase (or decrease) the
# velocities (relative to the center of mass) to preserve the
# energy. If fac > 1, expansion is always OK if E > 0, which it
# should be at this point (but check anyway...). May have E < 0
# if we have a system with small negative energy, stopped because
# it is too big.
# An additional consideration (Steve, 1/2017) is that all
# top-level nodes are mutually unbound at the end of the
# scattering, by construction, but this may not be preserved by
# a simple uniform rescaling of the system. In that case, an
# unphysical extra interaction may follow the scattering we
# thought was "over." Currently we check for this possibility,
# then modify the way in which velocities are scaled to
# compensate. NOT guaranteed to work in all cases, and the code
# is ugly...
vfac2 = 1-(1/fac-1)*pot/kin
#print "vfac2 =", vfac2
if vfac2 < 0:
print(pre, "Can't expand top level system to rjmin > ri+rj")
print("fac =", fac, " pot =", pot, " kin =", kin)
sys.stdout.flush()
f = pot/(kin+pot)
vfac2 = 0.0 # ???
vfac = math.sqrt(vfac2)
if local_debug:
print("vfac =", vfac)
print(pre, 'dr:')
print(dr)
print(pre, 'dv2:')
print(dv2)
bound_pairs = []
unbound = numpy.ones(n)
for i in range(n):
mi = node_list[i].mass
bound = False
for j in range(i+1,n):
mj = node_list[j].mass
mu = mi*mj/(mi+mj)
Eijold = 0.5*mu*dv2[i,j]*vunit2 - G*mi*mj/(dr[i,j]*lunit)
Eijnew = 0.5*mu*vfac2*dv2[i,j]*vunit2 - G*mi*mj/(fac*dr[i,j]*lunit)
if Eijnew.number <= 0.0:
#print 'bound', i, j, Eijold, Eijnew
bound = True
bound_pairs.append((i,j))
unbound[i] = 0
unbound[j] = 0
print(pre, 'bound pairs:', bound_pairs)
unbound_nodes = []
for i in range(n):
if unbound[i] == 1:
unbound_nodes.append(i)
print(pre, 'unbound_nodes:', unbound_nodes)
if len(unbound_nodes) == 0:
# Live with unphysical bound pairs for now. TODO
print('*** warning: no unbound nodes ***')
bound_pairs = []
if len(bound_pairs) > 0:
# Strategy #1: Scale positions uniformly as planned, but
# adjust the velocity scaling for pairs whose binding energy
# would become negative. Strategy #2 (unimplemented) would be
# to modify the spatial scaling by scaling bound pairs only to
# scale, then adjust velocities as in #1. Strategy #3 (also
# unimplemented) would be to not scale bound pairs at all.
energy = pot + kin # initial energy - conserved
for n in node_list:
n.position = cmpos + fac*(n.position-cmpos)
dr *= fac
pot /= fac
if local_debug:
print('kinetic energies:')
for n in node_list:
print(' ', n.id, 0.5*n.mass*((n.velocity-cmvel)**2).sum())
# First give the bound components enough relative velocity to
# just unbind them, keeping their center of mass velocity
# fixed. Note that, since Eij was > 0 and this prescription
# leaves Eij close to 0, this transformation should liberate
# energy for distribution to the rest of the system.
kin2 = zero
kinCM = zero
for p in bound_pairs:
i = p[0]
j = p[1]
ni = node_list[i]
nj = node_list[j]
mi = ni.mass
mj = nj.mass
newvfac2 = 2.000001*(G*(mi+mj)/(dr[i,j]*lunit))/(dv2[i,j]*vunit2)
newvfac = math.sqrt(newvfac2)
massinv = 1./(mi+mj)
cmv = (mi*ni.velocity + mj*nj.velocity)*massinv
ni.velocity = cmv + newvfac*(ni.velocity-cmv)
nj.velocity = cmv + newvfac*(nj.velocity-cmv)
kin2 += 0.5*mi*mj*massinv*((ni.velocity-nj.velocity)**2).sum()
kinCM += 0.5*(mi+mj)*((cmv-cmvel)**2).sum()
if local_debug:
print('KECM =', kin2+kinCM)
for i in unbound_nodes:
ni = node_list[i]
mi = ni.mass
kei = 0.5*mi*((ni.velocity-cmvel)**2).sum()
if local_debug:
print('KE', ni.id, kei)
kinCM += kei
if local_debug:
print('energy =', energy, 'pot+kin2+kinCM =', pot+kin2+kinCM)
kin_to_distribute = energy - (pot+kin2+kinCM)
if kin_to_distribute.number < 0:
print('*** warning: not enough kinetic energy ***') # TODO
vfac2 = 1+kin_to_distribute/kinCM
vfac = math.sqrt(vfac2)
#print 'vfac =', vfac
# Then apply an overall scaling to unbound nodes and bound CMs
# to conserve total energy.
for i in unbound_nodes:
ni = node_list[i]
ni.velocity = cmvel + vfac*(ni.velocity-cmvel)
for p in bound_pairs:
i = p[0]
j = p[1]
ni = node_list[i]
nj = node_list[j]
mi = ni.mass
mj = nj.mass
massinv = 1./(mi+mj)
cmv = (mi*ni.velocity + mj*nj.velocity)*massinv
newcmv = cmvel + vfac*(cmv-cmvel)
ni.velocity += newcmv - cmv
nj.velocity += newcmv - cmv
if len(bound_pairs) == 0:
# Perform global scaling of position and velocity.
for n in node_list:
n.position = cmpos + fac*(n.position-cmpos)
n.velocity = cmvel + vfac*(n.velocity-cmvel)
# Child data have not yet been modified. Do so here. Note that
# child positions and velocities were temporarily offset to the
# top-level center of mass.
for n in node_list:
if n.child1 != None:
dx = -n.position
dv = -n.velocity
offset_children(n, dx, dv)
#print_top_level(node_list, G)
def get_multiple_energy(node, kep):
# Return the binary status and the total energy Etot of the
# specified tree node. The value of Etot is the total pairwise
# energy of all binary objects in the hierarchy. It does not
# include the tidal potential of one component on another (e.g.
# in a hierarchical triple Etot will be the sum of two binary
# energies only).
# Note that kep should have been initialized with the correct
# converter to return the proper energy units.
is_bin = 1
Etot = zero
for level, x in node.iter_levels():
particle = x
if not particle.child1 is None:
if level > 0: is_bin = 0
child1 = particle.child1
child2 = particle.child2
M,a,e,r,Emu,t = get_component_binary_elements(child1, child2, kep)
mu = child1.mass*child2.mass/M
E = Emu*mu
Etot += E
return is_bin, Etot
def get_multiple_energy2(node, G):
# Return the binary status and the total energy of the specified
# tree node. Uses a value of G supplied by the caller. The
# returned value is the total energy of all leaves in the
# hierarchy, properly including tidal potentials, but excluding
# the center of mass energy.
is_bin = 1
Ecm = zero
for level, x in node.iter_levels():
if level == 0:
particle = x
M_comp = 0*particle.mass
vcm_comp = M_comp*particle.velocity
if particle.id == 1000074:
pp = True
break
# List the leaves and do some additional work. Note that
# node.get_leafs_subset() seems to do the same thing...
leaves_in_node = Particles(0)
for level, x in node.iter_levels():
particle = x
if level == 0:
# Want to compute the top-level kinetic energy. Might
# expect
vcm = particle.velocity
Ecm = 0.5*particle.mass*(vcm**2).sum()
# but in some circumstances (e.g. a binary created in a
# many-body process), the position and velocity of the
# parent may not correctly reflect the center of mass of
# its children.
if not particle.child1 is None:
if level > 0:
is_bin = 0 # not a multiple
else:
leaves_in_node.add_particle(particle) # list leaves
M_comp += particle.mass
vcm_comp += particle.mass*particle.velocity
vcm_comp /= M_comp
Ecm_comp = 0.5*M_comp*(vcm_comp**2).sum()
return is_bin, leaves_in_node.kinetic_energy() \
+ leaves_in_node.potential_energy(G=G) \
- Ecm_comp
def add_leaves(node, leaf_list):
if node.child1 == None:
leaf_list.add_particle(node)
else:
add_leaves(node.child1, leaf_list)
add_leaves(node.child2, leaf_list)
def get_energy_of_leaves(particles, G):
leaves = Particles(0)
for p in particles:
if not hasattr(p, 'child1') or p.child1 == None:
leaves.add_particle(p)
#print 'get_energy_of_leaves():'
#print leaves
ke = leaves.kinetic_energy()
pe = leaves.potential_energy(G=G)
e = ke+ pe
#print ke, pe, e
return e
def print_energies(stars, G):
# Brute force N^2 over top level, pure python...
top_level = stars.select(is_not_a_child, ["is_a_child"])
mass = zero
kinetic = zero
potential = zero
for t in top_level:
m = t.mass
x = t.x
y = t.y
z = t.z
vx = t.vx
vy = t.vy
vz = t.vz
mass += m
kinetic += 0.5*m*(vx**2+vy**2+vz**2)
dpot = zero
for tt in top_level:
if tt != t:
mm = tt.mass
xx = tt.x-x
yy = tt.y-y
zz = tt.z-z
dpot -= G*mm/(xx**2+yy**2+zz**2).sqrt()
potential += 0.5*m*dpot
print('len(stars) =', len(stars))
print('len(top_level) =', len(top_level))
print('mass =', mass)
print('kinetic =', kinetic)
print('potential =', potential)
print('energy =', kinetic+potential)
sys.stdout.flush()
def scale_top_level_list(singles, multiples, kep, scale,
gravity_constant, global_debug):
pre = 'scale_top_level_list:'
# The multiples code followed the particles until their
# interaction could be unambiguously classified as over. They may
# now be very far apart. Input singles and multiples are lists
# describing the final top-level structure of the interacting
# particles in the multiples code. Singles is a list of single
# stars. Multiples is a list of multiple centers of mass (with
# pointers to the internal structure).
# Scale the positions and velocities of the top-level nodes to
# bring them within a sphere of diameter scale, conserving energy
# and angular momentum (if possible). Also offset all children to
# reflect changes at the top level -- TODO: will change if/when
# offsets are implemented...
# Logic: 1 node - must be a binary, use kepler to reduce to scale
# 2 nodes - use kepler, reduce binary children too? TODO
# 3+ nodes - shrink radii and rescale velocities to preserve
# energy, but this doesn't preserve angular
# momentum TODO - also reduce children? TODO
top_level_nodes = singles + multiples
# Figure out the tree structure.
ls = len(singles)
lm = len(multiples)
lt = ls + lm
if global_debug > 1:
print(pre, 'ls =', ls, ' lm =', lm, ' lt =', lt)
sys.stdout.flush()
if lt == 1:
if lm == 1:
# Special case. We have a single bound binary node. Its
# children are the components we want to transform. Note
# that, if the components are binaries (or multiples),
# they must be stable, so it is always OK to move the
# components to periastron.
# Note: Wide binaries will be split and returned to the
# large-scale dynamics module after return from this
# function.
root = multiples[0]
if global_debug > 1:
print(pre, 'bound binary node', scale)
#print '\nunscaled binary node:'
#print_multiple_recursive(root)
comp1 = root.child1
comp2 = root.child2
if global_debug > 1:
print(pre, 'scale:', scale)
semi = rescale_binary_components(comp1, comp2, kep, scale)
#true, mean = kep.get_angles()
#print 'true =', true, 'mean =', mean
#print 'scaled binary node:'
#print_multiple_recursive(root, kep)
elif lt == 2:
# We have two unbound top-level nodes, and we will scale them
# using kepler to the desired separation, hence conserving
# both energy and angular momentum of the top-level motion.
# We might also want to scale the daughter nodes. Note as
# above that, if the daughters are binaries (or multiples),
# they must be stable, so it is always OK to move them to
# periastron.
comp1 = top_level_nodes[0]
comp2 = top_level_nodes[1]
if global_debug > 1:
print(pre, 'top-level unbound pair')
#print pre, '\nunscaled top-level pair:'
#print_pair_of_stars('pair', comp1, comp2, kep)
sys.stdout.flush()
semi = rescale_binary_components(comp1, comp2, kep, scale)
#print pre, '\nscaled top-level pair:'
#print_pair_of_stars('pair', comp1, comp2, kep)
#sys.stdout.flush()
else:
# We have three or more unbound top-level nodes. We don't
# know how to compress them in a conservative way. For now,
# we will conserve energy and think later about how to
# preserve angular momentum. TODO
print(pre, lt, 'top-level nodes, scale =', scale)
#print lt, 'unscaled top-level nodes'
#print top_level_nodes
compress_nodes(top_level_nodes, scale, gravity_constant)
#print lt, 'scaled top-level nodes'
#print top_level_nodes
# print pre, 'done'
sys.stdout.flush()
# Don't attempt to correct or even return the tidal energy error.
# Manage all of this in the calling function, as desired.
return
def set_radius_recursive(node, kep, global_debug):
if node.is_leaf(): return # nothing to be done
# Propagate child radii upward. Since dynamical radius scales
# with mass, the radius of a parent is just the sum of the radii
# of the children. If we are simply handling 2-body encounters,
# that's all we need. The semi-major axis of a hard binary is
# less than the dynamical radius, by definition. However, we must
# include the size of a soft binary or multiple, which may be
# significantly larger than the dynamical radius of the center of
# mass.
# Note that kep should have been initialized with the correct
# converter to return the proper energy units.
rsum = zero
for child in node.iter_children():
set_radius_recursive(child, kep, global_debug)
rsum += child.particle.radius
# Currently rsum is the dynamical radius of the node. Check how it
# compares to the node's semimajor axis.
M,semi,e,x,x,x = get_component_binary_elements(node.particle.child1,
node.particle.child2, kep)
if rsum < 2*semi:
# *** Factor of 2 here is ~arbitrary; should probably be set
# *** in the class definition.
if global_debug > 0:
print('increasing radius for', node.particle.id, 'from', \
rsum, 'to', 2*semi)
rsum = 2*semi
node.particle.radius = rsum
# Note: iter_children() lists the leaves lying under a given node of
# type BinaryTreeOnParticle. The child leaves are objects of type
# ChildTreeOnParticle. The particle associated with child x is
# x.particle.
def set_radii(top_level_nodes, kep, global_debug):
for n in top_level_nodes.as_binary_tree().iter_children():
set_radius_recursive(n, kep, global_debug)
#----------------------------------------------------------------------
def print_elements(s, a, e, r, Emu, E):
# Print binary elements in standard form.
print('{0} elements a = {1} e = {2} r = {3} E/mu = {4} E = {5}'\
.format(s, a, e, r, Emu, E))
sys.stdout.flush()
def print_pair_of_stars(s, star1, star2, kep):
m1 = star1.mass
m2 = star2.mass
M,a,e,r,E,t = get_component_binary_elements(star1, star2, kep)
print_elements(s, a, e, r, E, E*m1*m2/(m1+m2))
print_multiple_recursive(star1, kep)
print_multiple_recursive(star2, kep)
def print_multiple_recursive(m, kep, level=0): ##### not working? #####
# Recursively print the structure of (multiple) node m.
print(' '*level, 'key =', m.key, ' id =', int(m.id))
print(' '*level, ' mass =', m.mass)
print(' '*level, ' pos =', m.position)
print(' '*level, ' vel =', m.velocity)
sys.stdout.flush()
if not m.child1 is None and not m.child2 is None:
M,a,e,r,E,t = get_component_binary_elements(m.child1, m.child2, kep)
print_elements(' '+' '*level+'binary', a, e, r, E,
(E*m.child1.mass*m.child2.mass/M))
if not m.child1 is None:
print_multiple_recursive(m.child1, kep, level+1)
if not m.child2 is None:
print_multiple_recursive(m.child2, kep, level+1)
def print_multiple_simple(node, kep):
for level, x in node.iter_levels():
output = ''
if level == 0: output += 'Multiple '
output += ' ' * level
particle = x
output += "{0} id = {1} mass = {2} radius = {3}".format(particle.key,
particle.id,
particle.mass.number,
particle.radius.number)
if not particle.child1 is None:
child1 = particle.child1
child2 = particle.child2
M,a,e,r,E,t = get_component_binary_elements(child1, child2, kep)
mu = child1.mass*child2.mass/M
output += " semi = {0} energy = {1}".format(a.number,
(mu*E).number)
print(output)
sys.stdout.flush()
def print_multiple_detailed(node, kep, pre, kT, dcen):
is_bin = 1
Etot = zero
for level, x in node.iter_levels():
particle = x
init = pre
if level == 0: init += 'Multiple '
init += ' ' * level
id = particle.id
M = particle.mass.number
if not particle.child1 is None:
if level > 0: is_bin = 0
child1 = particle.child1
child2 = particle.child2
idlow = min(child1.id, child2.id)
idhigh = max(child1.id, child2.id)
print('%s%d (%d,%d) m=%.5f' % (init, id, idlow, idhigh, M), end=' ')
sys.stdout.flush()
M,a,e,r,Emu,t = get_component_binary_elements(child1, child2, kep)
cm = (child1.mass*child1.position + child2.mass*child2.position)/M
mu = child1.mass*child2.mass/M
E = Emu*mu
Etot += E
D = 0.
for k in range(3):
D += (cm[k].number - dcen[k].number)**2
D = numpy.sqrt(D)
print('a=%.6f e=%4f r=%6f D=%.4f E/mu=%.5f E=%.5f E/kT=%.5f' % \
(a.number, e, r.number, D, Emu.number, E.number, E/kT))
sys.stdout.flush()
else:
print('%s%d m=%.5f' % (init, id, M))
sys.stdout.flush()
return is_bin, Etot
def print_top_level(nodes, G):
# Print various top-level quantities of interest during rescaling.
print('')
print('distances:')
for i in nodes:
print(i.id, ' ', end=' ')
for j in nodes:
if j.id != i.id:
rij = (j.position-i.position).length()
print(j.id, rij, ' ', end=' ')
print('')
print('radial velocities:')
for i in nodes:
print(i.id, ' ', end=' ')
for j in nodes:
if j.id != i.id:
rij = (j.position-i.position).length()
vdotr = ((j.velocity-i.velocity)*(j.position-i.position)).sum()
print(j.id, vdotr/rij, ' ', end=' ')
print('')
print('potentials:')
for i in nodes:
print(i.id, ' ', end=' ')
mi = i.mass
for j in nodes:
if j.id != i.id:
mj = j.mass
rij = (j.position-i.position).length()
print(j.id, -G*mi*mj/rij, ' ', end=' ')
print('')
print('energies:')
pot = 0.0
kin = 0.0
for i in nodes:
print(i.id, ' ', end=' ')
mi = i.mass
vi = i.velocity
kin += 0.5*mi*(vi**2).sum()
for j in nodes:
if j.id != i.id:
mj = j.mass
muij = mi*mj/(mi+mj)
rij = (j.position-i.position).length()
vij2 = (j.velocity-i.velocity).length_squared()
print(j.id, 0.5*muij*vij2 - mi*mj/rij, ' ', end=' ')
if j.id > i.id:
pot -= G*mi*mj/rij
print('')
print('totals:', pot, kin, -kin/pot, pot+kin)
print('')
| 122,275
| 37.854782
| 90
|
py
|
amuse
|
amuse-main/src/amuse/couple/collision_handler.py
|
import inspect
from amuse.units import units
from amuse.datamodel import Particle, Particles
from amuse.support.exceptions import AmuseException
class CollisionHandler(object):
"""
Generic class for handling stellar collisions.
"""
def __init__(
self,
collision_code,
collision_code_arguments = dict(),
collision_code_parameters = dict(),
gravity_code = None,
stellar_evolution_code = None,
verbose = False,
**options
):
if inspect.isclass(collision_code):
self.collision_code_name = collision_code.__name__
else:
self.collision_code_name = collision_code.__class__.__name__
if collision_code.stellar_evolution_code_required and stellar_evolution_code is None:
raise AmuseException("{0} requires a stellar evolution code: "
"CollisionHandler(..., stellar_evolution_code=x)".format(self.collision_code_name))
if collision_code.gravity_code_required and gravity_code is None:
raise AmuseException("{0} requires a gravity code: "
"CollisionHandler(..., gravity_code=x)".format(self.collision_code_name))
self.collision_code = collision_code
self.collision_code_arguments = collision_code_arguments
self.collision_code_parameters = collision_code_parameters
self.gravity_code = gravity_code
self.stellar_evolution_code = stellar_evolution_code
self.verbose = verbose
self.options = options
def handle_collisions(self, primaries, secondaries):
result = Particles()
for primary, secondary in zip(primaries.as_set(), secondaries.as_set()):
result.add_particles(self.handle_collision(primary, secondary))
return result
def handle_collision(self, primary, secondary):
colliders = primary.as_set().copy()
colliders.add_particle(secondary)
if self.verbose:
print("Handling collision between stars with masses {0}.".format(colliders.mass))
if inspect.isclass(self.collision_code):
collision_code = self.collision_code(**self.collision_code_arguments)
if hasattr(collision_code, "initialize_code"):
collision_code.initialize_code()
for par, value in self.collision_code_parameters.items():
setattr(collision_code.parameters, par, value)
if hasattr(collision_code, "commit_parameters"):
collision_code.commit_parameters()
else:
collision_code = self.collision_code
handle_collision_args = self.options.copy()
if collision_code.stellar_evolution_code_required:
handle_collision_args["stellar_evolution_code"] = self.stellar_evolution_code
if collision_code.gravity_code_required:
handle_collision_args["gravity_code"] = self.gravity_code
merge_products = collision_code.handle_collision(primary, secondary, **handle_collision_args)
if self.verbose:
print("{0} concluded with return value:\n{1}".format(self.collision_code_name, merge_products))
if not self.stellar_evolution_code is None:
if (hasattr(self.stellar_evolution_code, "new_particle_from_model") and
(hasattr(merge_products, "get_internal_structure"))):
for merge_product in merge_products:
self.stellar_evolution_code.new_particle_from_model(
merge_product.get_internal_structure(),
0.0 | units.Myr,
key = merge_product.key
)
else:
self.stellar_evolution_code.particles.add_particles(merge_products)
self.stellar_evolution_code.particles.remove_particles(colliders)
if self.verbose:
print("Colliders have been replaced by merge product in {0}.".format(self.stellar_evolution_code.__class__.__name__))
if inspect.isclass(self.collision_code):
merge_products = merge_products.copy()
if hasattr(collision_code, "stop"):
collision_code.stop()
if not self.gravity_code is None:
new_grav_particles = Particles(keys=merge_products.key)
new_grav_particles.mass = merge_products.mass
if hasattr(merge_products, "radius"):
new_grav_particles.radius = merge_products.radius
elif hasattr(colliders, "radius"):
new_grav_particles.radius = max(colliders.radius)
if hasattr(merge_products, "x"):
new_grav_particles.position = merge_products.position
else:
new_grav_particles.position = colliders.center_of_mass()
if hasattr(merge_products, "vx"):
new_grav_particles.velocity = merge_products.velocity
else:
new_grav_particles.velocity = colliders.center_of_mass_velocity()
self.gravity_code.particles.add_particle(new_grav_particles)
self.gravity_code.particles.remove_particles(colliders)
if self.verbose:
print("Colliders have been replaced by merge product in {0}.".format(self.gravity_code.__class__.__name__))
return merge_products
| 5,584
| 41.961538
| 133
|
py
|
amuse
|
amuse-main/src/amuse/couple/bridge.py
|
"""
bridge-like integrator for amuse
the bridge class provides a bridge like coupling between different
gravitational integrators. In this way a system composed of multiple
components can be evolved taking account of the self gravity of the whole
system self consistently, while choosing the most appropiate integrator
for the self-gravity of the component systems. This is mainly useful for
systems consist of two or more components that are either well separated
spatially or have different scales (otherwise using a single integrator is
more efficient)
The main idea is that systems experience each others gravity through
periodic velocty kicks with ordinary evolution in between - the evolution
is thus described by an alternation of drift (D) and kick (K) operators,
here chosen as:
K(1/2 dt) D(dt) K(1/2 dt)
K(dt) denotes a kick of the velocities over a timestep dt, while D(dt)
denotes a drift, meaning secular evolution using self gravity of the
system, over dt.
implementation notes:
In order to use bridge the component systems should be initialized as usual,
then a bridge systems is initialized, after which one or more systems are
added:
from amuse.ext.bridge import bridge
bridgesys=bridge(verbose=False)
bridgesys.add_system(galaxy, (cluster,), False)
bridgesys.add_system(cluster, (galaxy,), True )
bridge builds on the full gravity interface, so unit handling etc is
guaranteed. Bridge itself is a (somewhat incomplete) gravity interface,
so the usual evolve, get_potential methods work (and bridge can be a
component in a bridge systems). Note that a single coordinate system should
be used at the moment for all the components systems (different units are
allowed though). The call to add systems, for example:
bridgesys.add_system(galaxy, (cluster,), False)
has three arguments: the system, a set with *interaction* partners and
a flag to specify whether synchronization is needed . The
interaction partners indicate which systems will kick the system. In the
most simple case these would be the set of other systems that are added,
but usually this is not what you want to get good performace. In some
cases you want to ignore one direction of interaction (eg. in a combined
simulation of a galaxy and a comet orbits around a star you may want the
ignore the gravity of the comet), in other cases you want to use a
different force calculator (eg integrating a cluster in a galaxy where
the galaxy is evolved with a tree code and the cluster with a direct sum
code, one also would want to use a tree code to calculate the cluster
gravity for the galaxy. In such a case one can derive a skeleton gravity
interface from the cluster system. A module is provided with some
examples of such *derived* systems, derived_grav_systems.py
Hints for good use:
The bridgesys is flexible but care should be taken in order to obtain
valid results. For one thing, there is no restriction or check on the
validity of the assumption of well seperated dynamics: for example any
system could be split up and put together in bridge, but if the timestep
is chosen to be larger than the timestep criterion of the code, the
integration will show errors.
For good performance one should use derived systems to reduce the
complexity where possible.
There is an issue with the synchronization: some codes do not end on the
exact time of an evolve, or need an explicit sync call. In these cases it
is up to the user to determine whether bridge can be used (an explicit
sync call may induce extra errors that degrade the order of the
integrator).
"""
# issues:
# - for now, units in si
# - a common coordinate system is used for all systems
# - sync of systems should be checked
# - timestepping: adaptive dt?
import threading
from amuse.units import quantities
from amuse.units import units, constants, generic_unit_system, nbody_system
from amuse import datamodel
from amuse.support.exceptions import AmuseException, CoreException
class AbstractCalculateFieldForCodes(object):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
"""
def __init__(self, input_codes, verbose=False, required_attributes=None):
"""
'verbose' indicates whether to output some run info
'required_attributes' specifies which particle attributes need to be
transferred from the input_codes to the code that will calculate the
field. For example, some codes don't need the velocity. Other codes
may (wrongly) interpret the radius of the input code as gravitational
softening. In the latter case
required_attributes=['mass', 'x','y','z', 'vx','vy','vz']
should prevent the radius of the input codes from being used.
"""
self.codes_to_calculate_field_for = input_codes
self.verbose=verbose
if required_attributes is None:
self.required_attributes = lambda p, attribute_name: True
else:
self.required_attributes = lambda p, attribute_name: attribute_name in required_attributes
def evolve_model(self,tend,timestep=None):
"""
"""
def get_potential_at_point(self,radius,x,y,z):
code = self._setup_code()
try:
for input_code in self.codes_to_calculate_field_for:
particles = input_code.particles.copy(filter_attributes = self.required_attributes)
code.particles.add_particles(particles)
return code.get_potential_at_point(radius,x,y,z)
finally:
self._cleanup_code(code)
def get_gravity_at_point(self,radius,x,y,z):
code = self._setup_code()
try:
for input_code in self.codes_to_calculate_field_for:
particles = input_code.particles.copy(filter_attributes = self.required_attributes)
code.particles.add_particles(particles)
return code.get_gravity_at_point(radius,x,y,z)
finally:
self._cleanup_code(code)
def _setup_code(self):
pass
def _cleanup_code(self, code):
pass
class CalculateFieldForCodes(AbstractCalculateFieldForCodes):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
The code is created for every calculation.
"""
def __init__(self, code_factory_function, input_codes, *args, **kwargs):
AbstractCalculateFieldForCodes.__init__(self, input_codes, *args, **kwargs)
self.code_factory_function = code_factory_function
def _setup_code(self):
return self.code_factory_function()
def _cleanup_code(self, code):
code.stop()
class CalculateFieldForCodesUsingReinitialize(AbstractCalculateFieldForCodes):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
The code is created for every calculation.
"""
def __init__(self, code, input_codes, *args, **kwargs):
AbstractCalculateFieldForCodes.__init__(self, input_codes, *args, **kwargs)
self.code = code
def _setup_code(self):
return self.code
def _cleanup_code(self, code):
code.reset()
class CalculateFieldForCodesUsingRemove(AbstractCalculateFieldForCodes):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
The code is created for every calculation.
"""
def __init__(self, code, input_codes, *args, **kwargs):
AbstractCalculateFieldForCodes.__init__(self, input_codes, *args, **kwargs)
self.code = code
def _setup_code(self):
return self.code
def _cleanup_code(self, code):
code.particles.remove_particles(code.particles)
class CalculateFieldForParticles(object):
"""
Calculates an field for a set of particles, the set
of particles can be from another code.
"""
def __init__(self, particles = None, gravity_constant = None,
softening_mode="shared", G = None):
if particles is None:
self.particles=datamodel.Particles()
else:
self.particles = particles
if gravity_constant is None:
gravity_constant = G
elif not G is None:
raise Exception("both the parameter 'gravity_constant'({0}) and the parameter 'G'({1}) are given, please specify only one!".format(gravity_constant, G))
if gravity_constant is None:
if len(particles) and hasattr(particles, 'mass'):
try:
particles[0].mass.value_in(units.kg)
self.gravity_constant = constants.G
except:
raise AmuseException("For generic units the gravity_constant must be specified")
else:
raise AmuseException("Particle data not yet available, so the gravity_constant must be specified")
else:
self.gravity_constant = gravity_constant
if softening_mode == "individual" or softening_mode == "radius":
self._softening_lengths_squared = self._softening_lengths_squared_individual
elif softening_mode == "h_smooth":
self._softening_lengths_squared = self._softening_lengths_squared_h_smooth
else:
self._softening_lengths_squared = self._softening_lengths_squared_shared
self.smoothing_length_squared = quantities.zero
def _softening_lengths_squared_individual(self):
return self.particles.radius**2
def _softening_lengths_squared_h_smooth(self):
return self.particles.h_smooth**2
def _softening_lengths_squared_shared(self):
return self.smoothing_length_squared#.as_vector_with_length(len(self.particles))
def cleanup_code(self):
self.particles = datamodel.Particles()
def evolve_model(self,tend,timestep=None):
"""
"""
def get_potential_at_point(self,radius,x,y,z):
positions = self.particles.position
result = quantities.AdaptingVectorQuantity()
for i in range(len(x)):
dx = x[i] - positions.x
dy = y[i] - positions.y
dz = z[i] - positions.z
dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
dr = (dr_squared + self._softening_lengths_squared()).sqrt()
energy_of_this_particle = (self.particles.mass / dr).sum()
result.append(-self.gravity_constant * energy_of_this_particle)
return result
def get_gravity_at_point(self,radius,x,y,z):
positions = self.particles.position
m1 = self.particles.mass
result_ax = quantities.AdaptingVectorQuantity()
result_ay = quantities.AdaptingVectorQuantity()
result_az = quantities.AdaptingVectorQuantity()
for i in range(len(x)):
dx = x[i] - positions.x
dy = y[i] - positions.y
dz = z[i] - positions.z
dr_squared = ((dx * dx) + (dy * dy) + (dz * dz) +
self._softening_lengths_squared() + radius[i]**2)
ax = -self.gravity_constant * (m1*dx/dr_squared**1.5).sum()
ay = -self.gravity_constant * (m1*dy/dr_squared**1.5).sum()
az = -self.gravity_constant * (m1*dz/dr_squared**1.5).sum()
result_ax.append(ax)
result_ay.append(ay)
result_az.append(az)
return result_ax, result_ay, result_az
class GravityCodeInField(object):
def __init__(self, code, field_codes, do_sync=True, verbose=False, radius_is_eps=False, h_smooth_is_eps=False, zero_smoothing=False):
"""
verbose indicates whether to output some run info
"""
self.code = code
self.field_codes = field_codes
if hasattr(self.code, 'model_time'):
self.time = self.code.model_time
else:
self.time = quantities.zero
self.do_sync=do_sync
self.verbose=verbose
self.timestep=None
self.radius_is_eps = radius_is_eps
self.h_smooth_is_eps = h_smooth_is_eps
required_attributes = ['mass', 'x', 'y', 'z', 'vx', 'vy', 'vz']
if self.radius_is_eps:
required_attributes.append('radius')
elif self.h_smooth_is_eps:
required_attributes.append('h_smooth')
self.required_attributes = lambda p, x : x in required_attributes
try:
hasattr(self.code.parameters, "epsilon_squared")
self.zero_smoothing = zero_smoothing
except AttributeError:
self.zero_smoothing = True
except CoreException: # hasattr will fail with an exception
self.zero_smoothing = True
def evolve_model(self,tend,timestep=None):
"""
evolve combined system to tend, timestep fixes timestep
"""
if timestep is None:
timestep = self.timestep
first=True
while self.time < (tend-timestep/2.):
if first:
self.kick(timestep/2.)
first=False
else:
self.kick(timestep)
self.drift(self.time+timestep)
self.time+=timestep
if not first:
self.kick(timestep/2.)
def synchronize_model(self):
"""
explicitly synchronize all components
"""
if hasattr(self.code,"synchronize_model"):
if(self.verbose):
print(self.code.__class__.__name__,"is synchronizing", end=' ')
self.code.synchronize_model()
if(self.verbose):
print(".. done")
def get_potential_at_point(self,radius,x,y,z):
return self.code.get_potential_at_point(radius,x,y,z)
def get_gravity_at_point(self,radius,x,y,z):
return self.code.get_gravity_at_point(radius,x,y,z)
@property
def model_time(self):
return self.time
@property
def potential_energy(self):
if not hasattr(self.code, 'particles'):
return quantities.zero
result = self.code.potential_energy
particles = self.code.particles.copy(filter_attributes = self.required_attributes)
for y in self.field_codes:
energy = self.get_potential_energy_in_field_code(particles, y)
result += energy
return result
@property
def kinetic_energy(self):
return self.code.kinetic_energy
@property
def thermal_energy(self):
if hasattr(self.code,'thermal_energy'):
return self.code.thermal_energy
else:
return quantities.zero
@property
def particles(self):
return self.code.particles
@property
def gas_particles(self):
if hasattr(self.code, "gas_particles"):
return self.code.gas_particles
else:
raise AttributeError
@property
def dm_particles(self):
if hasattr(self.code, "dm_particles"):
return self.code.dm_particles
else:
raise AttributeError
def drift(self, tend):
if not hasattr(self.code,"evolve_model"):
return
if (self.verbose):
print(self.code.__class__.__name__, "is evolving to", tend)
self.code.evolve_model(tend)
if(self.verbose):
print(".. done")
def cannot_kick(self):
"""
check if the code is capable of kicking other particles,
please do not try to optimize this, I know it is called every kick but
only calculating it at the start causes an annoying bug in certain uses of the code.
"""
return len(self.code.particles)==0 or not (hasattr(self, 'particles') and 'vx' in self.particles.get_attribute_names_defined_in_store())
def kick(self, dt):
if self.cannot_kick():
return quantities.zero
particles = self.code.particles.copy(filter_attributes = self.required_attributes)
kinetic_energy_before = particles.kinetic_energy()
for field_code in self.field_codes:
if(self.verbose):
print(self.code.__class__.__name__,"receives kick from",field_code.__class__.__name__, end=' ')
self.kick_with_field_code(
particles,
field_code,
dt
)
if(self.verbose):
print(".. done")
channel=particles.new_channel_to(self.code.particles)
channel.copy_attributes(["vx","vy","vz"])
kinetic_energy_after = particles.kinetic_energy()
return kinetic_energy_after - kinetic_energy_before
def _softening_lengths(self, particles):
if self.radius_is_eps:
return particles.radius
elif self.h_smooth_is_eps:
return particles.h_smooth
elif self.zero_smoothing:
return 0.*particles.x
else:
return (self.code.parameters.epsilon_squared**0.5).as_vector_with_length(len(particles))
def get_potential_energy_in_field_code(self, particles, field_code):
pot=field_code.get_potential_at_point(
self._softening_lengths(particles),
particles.x,
particles.y,
particles.z
)
return (pot*particles.mass).sum() / 2
def kick_with_field_code(self, particles, field_code, dt):
ax,ay,az=field_code.get_gravity_at_point(
self._softening_lengths(particles),
particles.x,
particles.y,
particles.z
)
self.update_velocities(particles, dt, ax, ay, az)
def update_velocities(self,particles, dt, ax, ay, az):
particles.vx += dt * ax
particles.vy += dt * ay
particles.vz += dt * az
def stop(self):
self.code.stop()
class Bridge(object):
def __init__(self, timestep = None, verbose=False, use_threading=True,method=None):
"""
verbose indicates whether to output some run info
"""
self.codes=[]
self.time=quantities.zero
self.verbose=verbose
self.timestep=timestep
self.kick_energy = quantities.zero
self.use_threading = use_threading
self.time_offsets = dict()
self.method=method
self.channels = datamodel.Channels()
def add_system(self, interface, partners=set(), do_sync=True,
radius_is_eps=False, h_smooth_is_eps=False, zero_smoothing=False):
"""
add a system to bridge integrator
"""
if hasattr(interface, "particles"):
code = GravityCodeInField(interface, partners, do_sync, self.verbose,
radius_is_eps, h_smooth_is_eps, zero_smoothing)
self.add_code(code)
else:
if len(partners):
raise Exception("You added a code without particles, but with partners, this is not supported!")
self.add_code(interface)
def add_code(self, code):
self.codes.append(code)
if hasattr(code,"model_time"):
self.time_offsets[code]=(self.time-code.model_time)
else:
self.time_offsets[code]=quantities.zero
def evolve_model(self, tend, timestep=None):
"""
evolve combined system to tend, timestep fixes timestep
"""
if timestep is None:
if self.timestep is None:
timestep=tend-self.time
else:
timestep = self.timestep
if self.method is None:
return self.evolve_joined_leapfrog(tend,timestep)
else:
return self.evolve_simple_steps(tend,timestep)
def evolve_simple_steps(self,tend,timestep):
while self.time < (tend-timestep/2):
self._drift_time=self.time
self.method(self.kick_codes,self.drift_codes_dt, timestep)
self.channels.copy()
self.time=self.time+timestep
def evolve_joined_leapfrog(self,tend,timestep):
first=True
while self.time < (tend-timestep/2.):
if first:
self.kick_codes(timestep/2.)
first=False
else:
self.kick_codes(timestep)
self.drift_codes(self.time+timestep)
self.channels.copy()
self.time += timestep
if not first:
self.kick_codes(timestep/2.)
def synchronize_model(self):
"""
explicitly synchronize all components
"""
for x in self.codes:
if hasattr(x,"synchronize_model"):
if(self.verbose): print(x.__class__.__name__,"is synchronizing", end=' ')
x.synchronize_model()
if(self.verbose): print(".. done")
def stop(self):
for one_code in self.codes:
if hasattr(one_code, "stop"):
one_code.stop()
def get_potential_at_point(self,radius,x,y,z):
pot=quantities.zero
for code in self.codes:
_pot=code.get_potential_at_point(radius,x,y,z)
pot=pot+_pot
return pot
def get_gravity_at_point(self,radius,x,y,z):
ax=quantities.zero
ay=quantities.zero
az=quantities.zero
for code in self.codes:
_ax,_ay,_az=code.get_gravity_at_point(radius,x,y,z)
ax=ax+_ax
ay=ay+_ay
az=az+_az
return ax,ay,az
@property
def model_time(self):
return self.time
@property
def potential_energy(self):
result=quantities.zero
for x in self.codes:
result+=x.potential_energy
return result
@property
def kinetic_energy(self):
result=quantities.zero
for x in self.codes:
result+=x.kinetic_energy
return result #- self.kick_energy
@property
def thermal_energy(self):
result=quantities.zero
for x in self.codes:
if hasattr(x,'thermal_energy'):
result+=x.thermal_energy
return result
@property
def particles(self):
array=[]
for x in self.codes:
if hasattr(x,"particles"):
array.append(x.particles)
if len(array) == 0:
raise AttributeError
elif len(array) == 1:
return array[0]
return datamodel.ParticlesSuperset(array)
@property
def gas_particles(self):
array=[]
for x in self.codes:
if hasattr(x,"gas_particles"):
array.append(x.gas_particles)
if len(array) == 0:
raise AttributeError
elif len(array) == 1:
return array[0]
return datamodel.ParticlesSuperset(array)
@property
def dm_particles(self):
array=[]
for x in self.codes:
if hasattr(x,"dm_particles"):
array.append(x.dm_particles)
elif hasattr(x,"particles"):
array.append(x.particles)
if len(array) == 0:
raise AttributeError
elif len(array) == 1:
return array[0]
return datamodel.ParticlesSuperset(array)
# 'private' functions
def drift_codes_dt(self,dt):
self._drift_time+=dt
self.drift_codes(self._drift_time)
def drift_codes(self,tend):
threads=[]
for x in self.codes:
offset=self.time_offsets[x]
if hasattr(x,"drift"):
threads.append(threading.Thread(target=x.drift, args=(tend-offset,)) )
elif hasattr(x,"evolve_model"):
threads.append(threading.Thread(target=x.evolve_model, args=(tend-offset,)) )
if self.use_threading:
for x in threads:
x.start()
for x in threads:
x.join()
else:
for x in threads:
x.run()
def kick_codes(self,dt):
de = quantities.zero
for x in self.codes:
if hasattr(x,"kick"):
de += x.kick(dt)
self.kick_energy += de
| 24,220
| 32.922969
| 164
|
py
|
amuse
|
amuse-main/src/amuse/couple/fallback_stellar_evolution.py
|
import os
import sys
import math
import traceback
import numpy
from amuse.units import units
from amuse.units import constants
from amuse.community.sse.interface import SSE
from amuse.community.evtwin.interface import EVtwin
from amuse.community.mesa.interface import MESA
from amuse.community.cachedse.interface import CachedStellarEvolution, ParticlesTimeseries
from amuse import datamodel
def _fb_search_endpoint_reached(star):
if (0 <= star.stellar_type.value_in(units.stellar_type) and \
star.stellar_type.value_in(units.stellar_type) <= 9) or \
star.stellar_type.value_in(units.stellar_type) == 16:
return False
return True
def is_remnant_stellar_type(stellar_type):
return stellar_type.value_in(units.stellar_type) in range(10,17)
class FallbackStellarEvolution(object):
"""
FallbackStellarEvolution (formerly EVtwin2SSE) started as a modification
of the EVtwin stellar evolution code that hands over execution to
SSE when EVtwin crashes. It now is set up to work with general codes.
The handover algorithm performs a RMS search on the relative differences of
mass, radius and luminosity between the last point known EVtwin state and
the entire stellar history of SSE until the first state of a stellar remnant type.
:argument enforce_monotonic_mass_evolution: (False) flag to enforce that the mass can only go down
:argument verbose: (True) be verbose?
:argument rms_weights: ([1.,1.,1.]) RMS weighting for [mass,radius,luminosity]
changelog:
2012-11-21 FIP code can handle N>1 stars, end_time, rms_weights, option to enforce monotonic mass evolution.
2012-11-20 FIP changed to FallbackStellarEvolution.
"""
def __init__(self, main_code_factory = EVtwin, fallback_code_factory = SSE,
enforce_monotonic_mass_evolution=False,verbose=True,rms_weights=[1.,1.,1.]):
self._main_se = main_code_factory()
self._fallback_se = fallback_code_factory()
self.particles = datamodel.Particles()
self.EVtwinAgeAtSwitch = dict()
self.EVtwinException = dict()
self.ActiveModel = dict()
self._FBTimeseries=dict()
self.model_time=0| units.Myr
self.enforce_monotonic_mass_evolution=enforce_monotonic_mass_evolution
self.verbose=verbose
if len(rms_weights)!=3:
raise Exception("weights should have len 3")
self.rms_weights=numpy.array(rms_weights,'d')
self.rms_weights/=self.rms_weights.sum()
if self.verbose:
print("started FallbackStellarEvolution with:")
print("main SE code:", self._main_se.__class__.__name__)
print("fallback SE code:", self._fallback_se.__class__.__name__)
if self.enforce_monotonic_mass_evolution:
print("enforcing monotonic mass evolution")
print("normalized rms weights are %5.3f (mass), %5.3f (radius), %5.3f (luminosity)"% \
(self.rms_weights[0],self.rms_weights[1],self.rms_weights[2]))
def cache_underlying_models(self, cacheDir):
self._main_se = CachedStellarEvolution(self._main_se, cacheDir)
self._fallback_se = CachedStellarEvolution(self._fallback_se, cacheDir)
# note: commit and recommit of parameters (and also of particles) should be checked at some point
# it works, but could lead to unexpected results (because sse evolution is delayed)
# def commit_parameters(self):
# self._main_se.commit_parameters()
# self._fallback_se.commit_parameters()
def commit_particles(self):
new=self.particles.difference(self._main_se.particles).copy()
removed=self._main_se.particles.difference(self.particles).copy()
# remove all particles from underlying models
if len(removed)>0:
self._main_se.particles.remove_particles(removed)
if len(removed)>0:
self._fallback_se.particles.remove_particles(removed)
# initialize EVtwin, transfer state
self._main_se.particles.add_particles(new)
for part in self.particles:
self.ActiveModel[part]=self._main_se
self._transfer_state(part)
# initialize SSE
sse_part=self._fallback_se.particles.add_particles(new)
for part in self.particles:
self._FBTimeseries[part]=ParticlesTimeseries(part.as_particle_in_set(self._fallback_se.particles))
self._FBTimeseries[part].add_timepoint()
self.model_time=self.particles.age.min()
# copy current state from underlying <active model>.particles to self.particles
def _transfer_state(self,particle,age_offset=None):
ActiveModelParticle=particle.as_particle_in_set(self.ActiveModel[particle].particles)
particle.mass = ActiveModelParticle.mass
particle.age = (ActiveModelParticle.age+age_offset) if age_offset else ActiveModelParticle.age
particle.luminosity = ActiveModelParticle.luminosity
particle.temperature = ActiveModelParticle.temperature
particle.stellar_type = ActiveModelParticle.stellar_type
particle.radius = ActiveModelParticle.radius
def evolve_model(self,tend=None):
if tend is not None:
while self.model_time<tend:
self.evolve_model()
return
for particle in self.particles:
if particle.age>self.model_time: continue
if self.ActiveModel[particle] == self._main_se:
evtwin_part=particle.as_particle_in_set(self._main_se.particles)
try:
prev_age = evtwin_part.age
evtwin_part.evolve_one_step()
if (prev_age == evtwin_part.age):
raise Exception("Evtwin model timestep is zero.")
self._transfer_state(particle)
# EVtwin crashed; switch to SSE
except Exception as ex:
self.EVtwinAgeAtSwitch[particle] = evtwin_part.age
self.EVtwinException[particle] = ex
self.ActiveModel[particle] = self._fallback_se
if self.verbose:
print("FallbackStellarEvolution switching models, %s (age = %s) threw exception: %s" % \
(self._main_se.__class__.__name__,self.EVtwinAgeAtSwitch[particle],self.EVtwinException[particle]))
# run SSE for just long enough to get data for the RMS search
sse_part=particle.as_particle_in_set(self._fallback_se.particles)
while not _fb_search_endpoint_reached(sse_part):
sse_part.evolve_one_step()
self._FBTimeseries[particle].add_timepoint()
if self.verbose:
print("FallbackStellarEvolution switch: evolved SSE to: %s " % (sse_part.age,))
sse_track=self._FBTimeseries[particle].particles[0]
self._FB_rms_search(evtwin_part, sse_track)
# TODO: Add ModelSwitchFailed exception when RMS statistics is above some threshold?
if self.verbose:
print(("FallbackStellarEvolution switch parameters: %s %s %s %s" %
(sse_track.SSEIndexAtSwitch, sse_track.SSENextStateIndex, sse_track.SSEAgeAtSwitch, sse_track.RMSErrorAtSwitch)))
self._evolve_model_FB(particle)
# model has been switched to SSE
else:
self._evolve_model_FB(particle)
self.model_time=self.particles.age.min()
# def _plausible_stellar_type_transition(evt_state, sse_state):
#
# return ( \
# # no change / advancement in stellar type
# (evt_state <= sse_state) or \
#
# # no differentiation between MS star and convective low-mass star
# (evt_state <= 1 and sse_state <= 1)
# )
# returns the optimal index for SSE rms
def _FB_rms_search(self, evtwin_star, sse_track):
sse_track.SSEIndexAtSwitch = float("nan")
sse_track.SSEAgeAtSwitch = float("nan")
sse_track.RMSErrorAtSwitch = float("inf")
# TODO heuristic for fixing non-physical stellar type transitions
#evtwin_final_known_state = -1
#for i in range(len(evt_raw['stellar_types'])):
# if evt_raw['stellar_types'][i] != 16:
# evtwin_final_known_state = evt_raw['stellar_types'][i]
for i in range(len(sse_track.age)):
# TODO
#if not plausible_stellar_type_transition(evtwin_final_known_state, sse_raw['stellar_types'][i]):
# continue
rel_diff_mass = (sse_track.mass[i] - evtwin_star.mass) / evtwin_star.mass
if self.enforce_monotonic_mass_evolution and rel_diff_mass>0:
continue
rel_diff_radius = (sse_track.radius[i] - evtwin_star.radius) / evtwin_star.radius
rel_diff_luminosity = (sse_track.luminosity[i] - evtwin_star.luminosity) / evtwin_star.luminosity
rms = ( self.rms_weights[0]*(rel_diff_mass)**2 \
+ self.rms_weights[1]*(rel_diff_radius)**2 \
+ self.rms_weights[2]*(rel_diff_luminosity)**2)
if (rms < sse_track.RMSErrorAtSwitch):
sse_track.SSEIndexAtSwitch = i
sse_track.SSENextStateIndex = i
sse_track.SSEAgeAtSwitch = sse_track.age[i] #- (10E-3 | units.Myr) # ugly way to "cheat the convergence check"
sse_track.RMSErrorAtSwitch = rms
# TODO calculate fudge factors for m, r, L, T?
#self._transfer_state_FB()
def _evolve_model_FB(self,star):
sse_part=star.as_particle_in_set(self._fallback_se.particles)
sse_track=self._FBTimeseries[star].particles[0]
# advance SSE if necessary
while (sse_track.SSENextStateIndex >= len(sse_track.age)):
sse_part.evolve_one_step()
self._FBTimeseries[star].add_timepoint()
# update state state
star.age = sse_track.age[ sse_track.SSENextStateIndex ] - sse_track.SSEAgeAtSwitch + self.EVtwinAgeAtSwitch[star]
star.mass = sse_track.mass[ sse_track.SSENextStateIndex ]
star.radius = sse_track.radius[ sse_track.SSENextStateIndex ]
star.luminosity = sse_track.luminosity[ sse_track.SSENextStateIndex ]
star.temperature = sse_track.temperature[ sse_track.SSENextStateIndex ]
star.stellar_type = sse_track.stellar_type[ sse_track.SSENextStateIndex ]
# advance index
sse_track.SSENextStateIndex = sse_track.SSENextStateIndex + 1
def stop(self):
self._main_se.stop()
self._fallback_se.stop()
if __name__ == '__main__':
stellar_evolution = FallbackStellarEvolution(MESA)
stellar_evolution._main_se.parameters.min_timestep_stop_condition=1.| units.yr
stars = datamodel.Particles(4)
stars.mass = [0.5,1.0,5.,100.] | units.MSun
stars = stellar_evolution.particles.add_particles(stars)
stellar_evolution.commit_particles()
print(stellar_evolution.model_time,'|', end=' ')
for star in stars:
print(star.stellar_type,'|', end=' ')
print(stellar_evolution.ActiveModel[star].__class__.__name__, end=' ')
print()
while stellar_evolution.model_time < 13.2 | units.Gyr:
stellar_evolution.evolve_model()
print(stellar_evolution.model_time,'|', end=' ')
for star in stars:
print(star.stellar_type,',', end=' ')
print(stellar_evolution.ActiveModel[star].__class__.__name__,'|', end=' ')
print()
stellar_evolution.stop()
| 11,720
| 43.06391
| 145
|
py
|
amuse
|
amuse-main/src/amuse/couple/__init__.py
| 0
| 0
| 0
|
py
|
|
amuse
|
amuse-main/src/amuse/couple/encounters.py
|
"""
This module defines the classe to handle handle close
encounters between particles.
It is used by the multiples module.
"""
from amuse.datamodel import Particle
from amuse.datamodel import Particles
from amuse.datamodel import ParticlesSuperset
from amuse.datamodel import trees
from amuse.units import constants
from amuse.units import nbody_system
from amuse.units import quantities
from amuse.units.quantities import as_vector_quantity
from amuse.units.quantities import zero
from amuse.support import options
from amuse.support import code
from amuse.support import interface
from amuse import io
import logging
import numpy
import logging
import sys
LOG_ENERGY = logging.getLogger('energy')
LOG_ENCOUNTER = logging.getLogger('encounter')
# todo to be more compatible with multiples module:
# - not handling the encounter in case of the neighbours
# - splitting a resulting binary in case of a perturber
class AbstractSelectNeighboursMixin(object):
def define_neighbours_selection_parameters(self, handler):
pass
def select_neighbours_from_field(self):
raise NotImplementedError
class EmptySelectNeighboursMixin(AbstractSelectNeighboursMixin):
def select_neighbours_from_field(self):
pass # do not select any neighbours
class SelectNeighboursByDistanceMixin(AbstractSelectNeighboursMixin):
def __init__(self):
self.neighbours_factor = 1.0
def get_neighbours_factor(self):
return self.neighbours_factor
def set_neighbours_factor(self, value):
self.neighbours_factor = value
def define_neighbours_selection_parameters(self, handler):
handler.add_method_parameter(
"get_neighbours_factor",
"set_neighbours_factor",
"neighbours_factor",
"look for neighbours of the interaction, neighbours_factor * large scale of the interaction",
default_value = 1.0
)
def select_neighbours_from_field(self):
if len(self.particles_in_field) == 0:
return
center_of_mass = self.particles_in_encounter.center_of_mass()
distances = (self.particles_in_field.position-center_of_mass).lengths()
near_distance = self.large_scale_of_particles_in_the_encounter * self.neighbours_factor
near_particles = self.particles_in_field[distances <= near_distance]
self.particles_close_to_encounter.add_particles(near_particles)
LOG_ENCOUNTER.info("neighbor particles (mutliples or singles): {0}".format(self.particles_close_to_encounter.key))
class SelectNeighboursByPerturbationMixin(AbstractSelectNeighboursMixin):
def __init__(self):
self.neighbor_perturbation_limit = 0.1
self.wide_perturbation_limit = 0.01
self.retain_binary_apocenter = True
def get_neighbor_perturbation_limit(self):
return self.neighbor_perturbation_limit
def set_neighbor_perturbation_limit(self, value):
self.neighbor_perturbation_limit = value
def get_wide_perturbation_limit(self):
return self.wide_perturbation_limit
def set_wide_perturbation_limit(self, value):
self.wide_perturbation_limit = value
def define_neighbours_selection_parameters(self, handler):
handler.add_method_parameter(
"get_neighbor_perturbation_limit",
"set_neighbor_perturbation_limit",
"neighbor_perturbation_limit",
"look for neighbours of the interaction if these neighbours might perturb the collission elements",
default_value = 0.1
)
handler.add_method_parameter(
"get_wide_perturbation_limit",
"set_wide_perturbation_limit",
"wide_perturbation_limit",
"split resulting binary in case of a possible perturber",
default_value = 0.01
)
def select_neighbours_from_field(self):
if len(self.particles_in_field) == 0:
return
center_of_mass = self.particles_in_encounter.center_of_mass()
distances = (self.particles_in_field.position-center_of_mass).lengths()
perturbation = self.particles_in_field.mass / distances**3
max_perturber_index = perturbation.argmax()
self.perturber_in_field = self.particles_in_field[max_perturber_index]
self.perturber_distance = distances[max_perturber_index]
factor = 0.5*(self.particles_in_encounter.mass.sum())/self.large_scale_of_particles_in_the_encounter**3
minimum_perturbation = self.neighbor_perturbation_limit*factor
print("ENCOUNTERS:", "minimum_perturbation", minimum_perturbation, "radius", self.large_scale_of_particles_in_the_encounter, "factor", factor)
near_particles = self.particles_in_field[numpy.logical_or(perturbation > minimum_perturbation , distances < self.large_scale_of_particles_in_the_encounter)]
print("NP:", near_particles)
LOG_ENCOUNTER.info("perturbations({0}): {1}".format(minimum_perturbation, perturbation[perturbation > minimum_perturbation]))
self.particles_close_to_encounter.add_particles(near_particles)
LOG_ENCOUNTER.info("neighbor particles (mutliples or singles): {0}".format(self.particles_close_to_encounter.key))
def remove_soft_binaries_from_evolved_state(self):
"""
Remove binaries with a aphelion (largest separation between the
parts) larger that the small scale of the encounter from the
resolved component list.
"""
tree = self.singles_and_multiples_after_evolve.new_binary_tree_wrapper()
nodes_to_break_up = []
# a branch in the tree is a node with two children
# the iter_branches will return only the branches under this node
roots = list(tree.iter_branches())
roots_to_check = list(roots)
# a leaf in the tree is a node with no children (a node can have no children or two children)
# the iter_leafs will return only the leafs under this node
singles = Particles()
for x in tree.iter_leafs():
singles.add_particle(x.particle)
while len(roots_to_check)>0:
root_node = roots_to_check.pop()
children = root_node.get_children_particles()
semimajor_axis, eccentricity = self.kepler_orbits.get_semimajor_axis_and_eccentricity_for_binary_components(
children[0],
children[1]
)
periapsis, apoapsis = self.kepler_orbits.get_periapsis_and_apoapsis(
semimajor_axis,
eccentricity
)
binary_scale = apoapsis if self.retain_binary_apocenter else semimajor_axis * 2
others = Particles()
for x in roots:
if not x is root_node:
others.add_particle(x.particle)
others.add_particles(singles)
others.add_particle(self.perturber_in_field)
distances = (others.position-root_node.particle.position).lengths()
perturbation = others.mass / distances**3
max_perturber_index = perturbation.argmax()
distance = distances[max_perturber_index]
max_perturbation = perturbation[max_perturber_index]
max_perturbation = 2*max_perturbation*binary_scale**3/root_node.particle.mass
print("max_perturbation:", max_perturbation, self.wide_perturbation_limit)
if max_perturbation > self.wide_perturbation_limit:
"break it up!"
nodes_to_break_up.append(root_node.particle)
print(roots, roots.index(root_node))
del roots[roots.index(root_node)]
print(roots)
# if we will break up a level in a triple/multiple, we
# also will check the binaries under that level.
roots_to_check.extend(root_node.iter_branches())
# as this is a binary tree with no pointer up the tree
# we can break up a binary by removing the parent particle
for root_node in nodes_to_break_up:
self.singles_and_multiples_after_evolve.remove_particle(root_node)
class AbstractHandleEncounter(object):
"""Abstract base class for all strategies to handle encounters.
We have different scales in the encounter:
1. Small scale of the interaction. This is the smallest distance
between any two particles in the encounter. Only binaries
with an aphelion smaller than this distance (times a factor)
will be handled as a hard binary.
2. Large scale of the interaction. This is the total diameter of
the containing sphere of the interaction. For two body interactions
this is the same as 1. This scale (times a factor)
will be used to find neighbour particles to include in the handling
of the encounter.
3. Initial sphere radius. This is the radius of the sphere containing
all the particles in the interaction (encounter + neighbours).
The sphere itself is centered on the center of mass of the particles.
This radius is used to scale back (or forward) all particles
after the interaction calculation is done.
After an interaction the following should be true of the particles:
1. The particles are moving apart.
2. The particles on the outside are just insisde the initial sphere
radius.
3. The distances between all pairs of particles is larger than the
small scale of interaction.
"""
def __init__(self,
kepler_code = None,
G = constants.G
):
self.G = G
self.kepler_orbits = KeplerOrbits(kepler_code)
handler = interface.HandleParameters(self)
self.define_parameters(handler)
self.parameters = handler.get_attribute('parameters', None)
self.hard_binary_factor = 3.0
self.scatter_factor = 10.0
self.small_scale_factor = 3.0
self.reset()
def before_set_parameter(self):
pass
def before_get_parameter(self):
pass
def define_parameters(self, handler):
self.define_neighbours_selection_parameters(handler)
handler.add_method_parameter(
"get_hard_binary_factor",
"set_hard_binary_factor",
"hard_binary_factor",
"a hard binary is defined as the small scale of the interaction times this factor",
default_value = 3.0
)
handler.add_method_parameter(
"get_scatter_factor",
"set_scatter_factor",
"scatter_factor",
"Initial separation for the scattering experiment, relative to the small scale of the interaction.",
default_value = 3.0
)
def get_hard_binary_factor(self):
return self.hard_binary_factor
def set_hard_binary_factor(self, value):
self.hard_binary_factor = value
def get_scatter_factor(self):
return self.scatter_factor
def set_scatter_factor(self, value):
self.scatter_factor = value
def reset(self):
self.particles_in_field = Particles()
self.particles_in_encounter = Particles()
self.particles_close_to_encounter = Particles()
self.multiples_in_encounter = Particles()
self.all_particles_in_encounter = ParticlesSuperset([self.particles_in_encounter, self.particles_close_to_encounter])
self.existing_multiples = Particles()
self.existing_binaries = Particles()
self.new_binaries = Particles()
self.new_multiples = Particles()
self.updated_binaries = Particles()
self.updated_multiples = Particles()
self.dissolved_binaries = Particles()
self.dissolved_multiples = Particles()
self.captured_singles = Particles()
self.released_singles = Particles()
self.all_singles_in_encounter = Particles()
self.all_singles_close_to_encounter = Particles()
self.all_singles_in_evolve = ParticlesSuperset([self.all_singles_in_encounter, self.all_singles_close_to_encounter])
self.singles_and_multiples_after_evolve = Particles()
self.kepler_orbits.reset()
self.scatter_energy_error = zero
def execute(self):
self.determine_scale_of_particles_in_the_encounter()
self.select_neighbours_from_field()
self.determine_initial_energies()
self.determine_initial_sphere_of_particles_in_encounter()
print("scale_up system")
self.scale_up_system_if_two_body_scattering()
print("determine initial")
self.determine_initial_multiple_energy()
print("determin singles and energies")
self.determine_singles_and_energies_from_particles_and_neighbours_in_encounter()
print("determine initial singles")
self.determine_initial_singles_energies()
print("move all singles")
self.move_all_singles_to_initial_sphere_frame_of_reference()
print("evolve singles")
self.evolve_singles_in_encounter_until_end_state()
#self.particles_before_scaling = self.singles_and_multiples_after_evolve .copy()
print("determine structure")
self.determine_structure_of_the_evolved_state()
self.scale_evolved_state_to_initial_sphere()
self.remove_soft_binaries_from_evolved_state()
self.determine_multiples_in_the_evolved_state()
self.determine_captured_singles_from_the_multiples()
self.determine_released_singles_from_the_multiples()
self.determine_particles_after_encounter()
self.move_evolved_state_to_original_frame_of_reference()
self.update_positions_of_subsets()
self.determine_final_multiple_energy()
self.determine_final_energies()
self.create_energy_report()
self.error_on_fly_away()
def error_on_fly_away(self):
center_of_mass = self.particles_after_encounter.center_of_mass()
distances = (self.particles_after_encounter.position - center_of_mass).lengths()
particles_to_far = self.particles_after_encounter[(distances > self.large_scale_of_particles_in_the_encounter)]
if len(particles_to_far) > 0:
print("distances:", distances)
print("lsi:", self.large_scale_of_particles_in_the_encounter)
print("scaled:", distances / self.large_scale_of_particles_in_the_encounter)
#raise Exception('a particle is too far!');
def determine_initial_energies(self):
self.initial_energy = self.all_particles_in_encounter.kinetic_energy()
self.initial_energy += self.all_particles_in_encounter.potential_energy(G=self.G)
# particles_close_to_encounter come from the field,
# so these have to be removed for the potential calculation
self.initial_potential_in_field = self.all_particles_in_encounter.potential_energy_in_field(
self.particles_in_field - self.particles_close_to_encounter,
G=self.G
)
LOG_ENERGY.info("E0={0}, PHI0={1}".format(self.initial_energy, self.initial_potential_in_field))
def determine_scale_of_particles_in_the_encounter(self):
# limit all scaling to the sum of radii of two particles
# the min distance may not be smaller
# the max distance may not be smaller
max_sum_radii = None
radii = self.particles_in_encounter.radius
for i, radius in enumerate(radii[:-1]):
max_sum_radii_i = (radius + radii[i+1:]).max()
if max_sum_radii is None or max_sum_radii_i > max_sum_radii:
max_sum_radii = max_sum_radii_i
# determine large scale from the distance of the farthest particle to the center of mass
center_of_mass = self.particles_in_encounter.center_of_mass()
distances = (self.particles_in_encounter.position-center_of_mass).lengths()
max_distance = distances.max() * 2 # times 2 as we are relative to the center of mass
#max_distance = max(max_sum_radii, max_distance)
self.large_scale_of_particles_in_the_encounter = max_distance
# determine small scale from the smallest distance between all pairs in the encounter
# for two body interaction this scale is the same as the large scale
positions = self.particles_in_encounter.position
transpose_positions = positions.reshape((len(self.particles_in_encounter), 1, 3))
distances_between_all_particles = ((transpose_positions - positions)**2).sum(axis=2).sqrt()
distances_between_different_particles = distances_between_all_particles[distances_between_all_particles > 0*max_distance]
min_distance = distances_between_different_particles.min()
min_distance = max(max_sum_radii, min_distance)
self.small_scale_of_particles_in_the_encounter = self.small_scale_factor * min_distance
def determine_initial_multiple_energy(self):
self.initial_multiple_energy = zero
print(self.particles_in_encounter)
for x in self.particles_in_encounter:
energy = self.get_energy_of_a_multiple(x)
self.initial_multiple_energy += energy
for x in self.particles_close_to_encounter:
energy = self.get_energy_of_a_multiple(x)
self.initial_multiple_energy += energy
def determine_final_multiple_energy(self):
self.final_multiple_energy = zero
for x in self.particles_after_encounter:
energy = self.get_final_energy_of_a_multiple(x)
self.final_multiple_energy += energy
def determine_singles_and_energies_from_particles_and_neighbours_in_encounter(self):
for x in self.particles_in_encounter:
components = self.break_up_multiple_and_return_singles_of_a_particle(x)
self.all_singles_in_encounter.add_particles(components)
for x in self.particles_close_to_encounter:
components = self.break_up_multiple_and_return_singles_of_a_particle(x)
self.all_singles_close_to_encounter.add_particles(components)
def determine_initial_singles_energies(self):
self.initial_singles_energy = self.all_singles_in_evolve.kinetic_energy()
self.initial_singles_energy += self.all_singles_in_evolve.potential_energy(G=self.G)
self.delta_phi_1 = self.initial_singles_energy - self.initial_energy - self.initial_multiple_energy
LOG_ENERGY.info("E1={0}, EMul0={1}, DPHI1={2}".format(
self.initial_singles_energy,
self.initial_multiple_energy,
self.delta_phi_1
)
)
def determine_final_energies(self):
self.final_kinetic_energy = self.particles_after_encounter.kinetic_energy()
self.final_energy = self.final_kinetic_energy + self.particles_after_encounter.potential_energy(G=self.G)
self.final_potential_in_field = self.particles_after_encounter.potential_energy_in_field(
self.particles_in_field - self.particles_close_to_encounter,
G=self.G
)
self.delta_phi_2 = (
self.initial_singles_energy + self.scatter_energy_error -
self.final_multiple_energy -
self.final_energy
)
LOG_ENERGY.info("E3={0}, PHI2={1}, EMul1={2}, DPHI2={3}".format(
self.final_energy,
self.final_potential_in_field,
self.final_multiple_energy,
self.delta_phi_2
)
)
def create_energy_report(self):
self.delta_energy = self.final_energy - self.initial_energy
self.delta_potential_in_field = self.final_potential_in_field - self.initial_potential_in_field
self.delta_multiple_energy = self.final_multiple_energy - self.initial_multiple_energy
self.delta_internal_potential = self.delta_phi_2 - self.delta_phi_1
if self.final_kinetic_energy == zero:
return
tidal_factor = self.delta_energy / self.final_kinetic_energy
if abs(tidal_factor) > 1e-2:
LOG_ENERGY.warn("Tidal correction is needed, {0}".format(tidal_factor))
def break_up_multiple_and_return_singles_of_a_particle(self, particle):
if particle in self.existing_multiples:
multiple = particle.as_particle_in_set(self.existing_multiples)
components = multiple.components
tree = components.new_binary_tree_wrapper()
result = Particles()
for node in tree.iter_descendant_leafs():
result.add_particle(node.particle)
result.position += particle.position
result.velocity += particle.velocity
self.multiples_in_encounter.add_particle(multiple)
return result
else:
return particle.as_set()
def get_energy_of_a_multiple(self, particle):
if particle in self.existing_multiples:
multiple = particle.as_particle_in_set(self.existing_multiples)
components = multiple.components
tree = components.new_binary_tree_wrapper()
singles = Particles()
for node in tree.iter_descendant_leafs():
singles.add_particle(node.particle)
# tree is stored in rest state,
# no energy of central particle
energy = singles.kinetic_energy()
energy += singles.potential_energy(G = self.G)
return energy
else:
return zero
def get_final_energy_of_a_multiple(self, particle):
if particle in self.new_multiples:
multiple = particle.as_particle_in_set(self.new_multiples)
elif particle in self.updated_multiples:
multiple = particle.as_particle_in_set(self.updated_multiples)
else:
return zero
components = multiple.components
tree = components.new_binary_tree_wrapper()
singles = Particles()
for node in tree.iter_descendant_leafs():
singles.add_particle(node.particle)
# tree is stored in rest state,
# no energy of central particle
energy = singles.kinetic_energy()
energy += singles.potential_energy(G = self.G)
return energy
def determine_initial_sphere_of_particles_in_encounter(self):
self.initial_sphere_position = self.all_particles_in_encounter.center_of_mass()
self.initial_sphere_velocity = self.all_particles_in_encounter.center_of_mass_velocity()
distances = (self.all_particles_in_encounter.position-self.initial_sphere_position).lengths()
self.initial_sphere_radius = max(distances.max(), self.small_scale_of_particles_in_the_encounter / 2.0)
def move_all_singles_to_initial_sphere_frame_of_reference(self):
self.all_singles_in_evolve.position -= self.initial_sphere_position
self.all_singles_in_evolve.velocity -= self.initial_sphere_velocity
def move_evolved_state_to_original_frame_of_reference(self):
self.particles_after_encounter.position += self.initial_sphere_position
self.particles_after_encounter.velocity += self.initial_sphere_velocity
def scale_up_system_if_two_body_scattering(self):
if len(self.particles_close_to_encounter) > 0:
return # no two body scattering if close perturbers
if not (len(self.particles_in_encounter) == 2):
return
print("initial_scatter_scale:", self.scatter_factor * self.small_scale_of_particles_in_the_encounter, self.small_scale_of_particles_in_the_encounter, self.scatter_factor)
delta_position, delta_velocity = self.kepler_orbits.expand_binary(
self.particles_in_encounter,
self.scatter_factor * self.small_scale_of_particles_in_the_encounter
)
self.particles_in_encounter.position += delta_position
self.particles_in_encounter.velocity += delta_velocity
def get_potential_energy_of_particles_in_field(self, particles, field):
"""
Returns the potential energy of a set of particles in
a field given by another set of particles. Implemented in python,
subclasses should reimplement this function to use a code.
"""
return particles.potential_energy_in_field(field)
def evolve_singles_in_encounter_until_end_state(self):
"""
Resolves the system of the component particles in the encounter
(the components of the particles in the encounter and
the componentns of the neighbouring particles). Fills a new
set with the resolved particles. Implementation on the abstract
class is a no-op, need to re-implement this on a subclass
"""
self.singles_and_multiples_after_evolve.add_particles(self.all_singles_in_evolve)
self.scatter_energy_error = zero
def determine_structure_of_the_evolved_state(self):
"""
Based on the evolved solution determine the hierarchical structure
of the particles (i.e. binary, triples etc).
Implementation on the abstract class is a no-op, need to re-implement this on a subclass
"""
self.singles_and_multiples_after_evolve.child1 = None
self.singles_and_multiples_after_evolve.child2 = None
def remove_soft_binaries_from_evolved_state(self):
"""
Remove binaries with a aphelion (largest separation between the
parts) larger that the small scale of the encounter from the
resolved component list.
"""
tree = self.singles_and_multiples_after_evolve.new_binary_tree_wrapper()
nodes_to_break_up = []
hard_binary_radius = self.small_scale_of_particles_in_the_encounter * self.hard_binary_factor
# a branch in the tree is a node with two children
# the iter_branches will return only the branches under this node
roots_to_check = list(tree.iter_branches())
while len(roots_to_check)>0:
root_node = roots_to_check.pop()
children = root_node.get_children_particles()
semimajor_axis, eccentricity = self.kepler_orbits.get_semimajor_axis_and_eccentricity_for_binary_components(
children[0],
children[1]
)
if semimajor_axis < hard_binary_radius:
continue
nodes_to_break_up.append(root_node.particle)
# if we will break up a level in a triple/multiple, we
# also will check the binaries under that level.
roots_to_check.extend(root_node.iter_branches())
# as this is a binary tree with no pointer up the tree
# we can break up a binary by removing the parent particle
for root_node in nodes_to_break_up:
self.singles_and_multiples_after_evolve.remove_particle(root_node)
def scale_evolved_state_to_initial_sphere(self):
"""
Scale the system so that all particles are just inside the initial sphere.
Particles should be moving apart.
Implementation should be equivalent to moving the system back in time (or forward
if the system is smaller than the initial scale).
"""
pass
def determine_multiples_in_the_evolved_state(self):
"""
Called after culling and scaling the evolved state. What is left
are:
1. multiples (binaries, triples etc) that need to be handled
as a single particle
2. singles
"""
tree = self.singles_and_multiples_after_evolve.new_binary_tree_wrapper()
multiple_lookup_table = {}
for multiple in self.existing_multiples:
for particle in self.singles_of_a_multiple(multiple):
multiple_lookup_table[particle.key] = multiple
binary_lookup_table = {}
for binary in self.existing_binaries:
binary_lookup_table[binary.child1.key] = binary
binary_lookup_table[binary.child2.key] = binary
# a branch in the tree is a child node with two children
for root_node in tree.iter_branches():
root_particle = root_node.particle
multiple_components = Particles()
# descendant_leafs are all children and grandchildren and ... without children
for child in root_node.iter_descendant_leafs():
component_particle = multiple_components.add_particle(child.particle)
self.update_binaries(root_node, binary_lookup_table)
# put all components in frame of reference of the root particle
multiple_components.position -= root_particle.position
multiple_components.velocity -= root_particle.velocity
existing_multiple = self.lookup_existing_multiple_with_components(multiple_components, multiple_lookup_table)
# create or copy multiple particle and store it
if existing_multiple is None:
multiple_particle = root_particle.copy()
multiples_set = self.new_multiples
else:
multiple_particle = existing_multiple.copy()
multiple_particle.position = root_particle.position
multiple_particle.velocity = root_particle.velocity
multiple_particle.mass = root_particle.mass
multiples_set = self.updated_multiples
multiple_particle.child1 = None
multiple_particle.child2 = None
multiple_particle.components = multiple_components
multiple_particle.radius = self.determine_radius_from_components(multiple_components)
multiples_set.add_particle(multiple_particle)
for multiple in self.multiples_in_encounter:
if not multiple in self.updated_multiples:
self.dissolved_multiples.add_particle(multiple)
# a leaft in the tree is a child node with no children
for root_node in tree.iter_leafs():
self.update_binaries_from_single(root_node.particle, binary_lookup_table)
def determine_radius_from_components(self, components):
return components.position.lengths().max() * 2
def singles_of_a_multiple(self, multiple):
components = multiple.components
tree = components.new_binary_tree_wrapper()
singles = Particles()
for node in tree.iter_descendant_leafs():
singles.add_particle(node.particle)
return singles
def lookup_existing_multiple_with_components(self, components, multiple_lookup_table):
found_multiple = None
if components[0].key in multiple_lookup_table:
found_multiple = multiple_lookup_table[components[0].key]
else:
return None
for x in components[1:]:
if x.key in multiple_lookup_table:
if not found_multiple == multiple_lookup_table[x.key]:
return None
else:
return None
return found_multiple
def determine_captured_singles_from_the_multiples(self):
for particle in self.particles_in_encounter:
if particle in self.existing_multiples:
continue
for multiple in self.new_multiples:
if particle in multiple.components:
self.captured_singles.add_particle(particle)
for particle in self.particles_close_to_encounter:
if particle in self.existing_multiples:
continue
for multiple in self.new_multiples:
if particle in multiple.components:
self.captured_singles.add_particle(particle)
def determine_released_singles_from_the_multiples(self):
tree = self.singles_and_multiples_after_evolve.new_binary_tree_wrapper()
for root_node in tree.iter_leafs():
particle = root_node.particle
if particle in self.particles_in_encounter or particle in self.particles_close_to_encounter:
continue
found = False
for multiple in self.new_multiples:
if particle in multiple.components:
found = True
break
if not found:
self.released_singles.add_particle(particle)
def determine_particles_after_encounter(self):
particles_after_encounter = Particles()
particles_after_encounter.add_particles(self.particles_in_encounter)
particles_after_encounter.add_particles(self.particles_close_to_encounter)
particles_after_encounter.remove_particles(self.dissolved_multiples)
particles_after_encounter.remove_particles(self.captured_singles)
particles_after_encounter.add_particles(self.released_singles)
particles_after_encounter.add_particles(self.new_multiples)
channel = self.singles_and_multiples_after_evolve.new_channel_to(particles_after_encounter)
channel.copy_attributes(["x","y","z", "vx", "vy","vz"])
self.particles_after_encounter = particles_after_encounter
def update_positions_of_subsets(self):
channel = self.particles_after_encounter.new_channel_to(self.new_binaries)
channel.copy_attributes(["x","y","z", "vx", "vy","vz"])
channel = self.particles_after_encounter.new_channel_to(self.new_multiples)
channel.copy_attributes(["x","y","z", "vx", "vy","vz"])
channel = self.particles_after_encounter.new_channel_to(self.updated_multiples)
channel.copy_attributes(["x","y","z", "vx", "vy","vz"])
channel = self.particles_after_encounter.new_channel_to(self.released_singles)
channel.copy_attributes(["x","y","z", "vx", "vy","vz"])
def update_binaries_from_single(self, single, binary_lookup_table):
key = single.key
if key in binary_lookup_table:
binary = binary_lookup_table[key]
self.dissolved_binaries.add_particle(binary)
del binary_lookup_table[binary.child1.key]
del binary_lookup_table[binary.child2.key]
def update_binaries(self, root_node, binary_lookup_table):
# a binary tree node is a node with two children
# the children are leafs (have no children of their own)
if root_node.is_binary():
self.lookup_and_update_binary(root_node, binary_lookup_table)
else:
for single in root_node.iter_leafs():
self.update_binaries_from_single(single.particle, binary_lookup_table)
for branch in root_node.iter_descendant_branches():
if branch.is_binary():
self.lookup_and_update_binary(branch, binary_lookup_table)
else:
for single in branch.iter_leafs():
self.update_binaries_from_single(single.particle, binary_lookup_table)
def lookup_and_update_binary(self, root_node, binary_lookup_table):
binary_found_in_encounter = root_node.particle
children = list(root_node.iter_children())
key0 = children[0].particle.key
key1 = children[1].particle.key
if key0 in binary_lookup_table:
if key1 in binary_lookup_table:
binary0 = binary_lookup_table[key0]
binary1 = binary_lookup_table[key1]
if binary0 is binary1:
binary_known_in_system = binary0
self.update_binary(binary_found_in_encounter, binary_known_in_system)
else:
x = self.new_binaries.add_particle(binary_found_in_encounter)
self.dissolved_binaries.add_particle(binary0)
self.dissolved_binaries.add_particle(binary1)
del binary_lookup_table[binary0.child1.key]
del binary_lookup_table[binary0.child2.key]
del binary_lookup_table[binary1.child1.key]
del binary_lookup_table[binary1.child2.key]
binary_lookup_table[key0] = x
binary_lookup_table[key1] = x
else:
x = self.new_binaries.add_particle(binary_found_in_encounter)
binary0 = binary_lookup_table[key0]
self.dissolved_binaries.add_particle(binary0)
del binary_lookup_table[binary0.child1.key]
del binary_lookup_table[binary0.child2.key]
binary_lookup_table[key0] = x
binary_lookup_table[key1] = x
elif key1 in binary_lookup_table:
x = self.new_binaries.add_particle(binary_found_in_encounter)
binary1 = binary_lookup_table[key1]
self.dissolved_binaries.add_particle(binary1)
del binary_lookup_table[binary1.child1.key]
del binary_lookup_table[binary1.child2.key]
binary_lookup_table[key0] = x
binary_lookup_table[key1] = x
else:
self.new_binaries.add_particle(binary_found_in_encounter)
def update_binary(self, binary_found_in_encounter, binary_known_in_system):
binary_copy = self.updated_binaries.add_particle(binary_known_in_system)
binary_copy.child1 = binary_found_in_encounter.child1.copy()
binary_copy.child2 = binary_found_in_encounter.child2.copy()
binary_copy.position = binary_found_in_encounter.position
binary_copy.velocity = binary_found_in_encounter.velocity
class HandleEncounterWithCollisionCode(AbstractHandleEncounter):
def __init__(self,
kepler_code,
resolve_collision_code,
interaction_over_code = None,
G = nbody_system.G
):
self.resolve_collision_code = resolve_collision_code
self.interaction_over_code = interaction_over_code
AbstractHandleEncounter.__init__(
self,
kepler_code,
G
)
def reset(self):
AbstractHandleEncounter.reset(self)
self.resolve_collision_code.reset()
if not self.interaction_over_code is None:
self.interaction_over_code.reset()
def evolve_singles_in_encounter_until_end_state(self):
for x in self.all_singles_in_evolve:
print(x)
code = self.resolve_collision_code
code.reset()
code.particles.add_particles(self.all_singles_in_evolve)
initial_scatter_energy = code.get_total_energy()
end_time = 10000 | nbody_system.time
if len(self.all_singles_in_evolve) == 2:
end_time = 100 | nbody_system.time
code.evolve_model(0.0001 * end_time)
interaction_over = code.stopping_conditions.interaction_over_detection
interaction_over.enable()
LOG_ENCOUNTER.info("evolving singles in encounter")
print(self.all_singles_in_evolve)
code.evolve_model(end_time)
LOG_ENCOUNTER.info("evolving singles in encounter finished model_time = {0}".format(code.model_time))
print("i over:", interaction_over.is_set())
if interaction_over.is_set():
# Create a tree in the module representing the binary structure.
code.update_particle_tree()
# Return the tree structure.
code.update_particle_set()
print(code.particles)
final_scatter_energy = code.get_total_energy()
self.scatter_energy_error = final_scatter_energy - initial_scatter_energy
self.singles_and_multiples_after_evolve.add_particles(code.particles)
self.particles_before_scaling = code.particles.copy()
LOG_ENERGY.info('scatter_energy_error={0}'.format(self.scatter_energy_error))
return
raise Exception(
"Did not finish the small-N simulation before end time {0}".
format(end_time)
)
def determine_structure_of_the_evolved_state(self):
"""
Based on the evolved solution determine the hierarchical structure
of the particles (i.e. binary, triples etc).
Structure is determined during evolve singles....
"""
pass
def scale_evolved_state_to_initial_sphere(self):
"""
Scale the system so that all particles are just inside the initial sphere.
Particles should be moving apart.
Implementation should be equivalent to moving the system back in time (or forward
if the system is smaller than the initial scale).
"""
self.scale_code = ScaleSystem(self.kepler_orbits, self.G)
tree = self.singles_and_multiples_after_evolve.new_binary_tree_wrapper()
roots_and_singles = tree.get_children_subset()
self.scale_code.scale_particles_to_sphere(roots_and_singles, 1.01 * self.initial_sphere_radius)
class HandleEncounterWithSmallN(AbstractHandleEncounter):
debug_encounters = False
def __init__(self,
kepler_code,
resolve_collision_code,
interaction_over_code = None,
G = nbody_system.G
):
self.resolve_collision_code = resolve_collision_code
self.interaction_over_code = interaction_over_code
AbstractHandleEncounter.__init__(
self,
kepler_code,
G
)
def evolve_singles_in_encounter_until_end_state(self):
pre = 'encounter:' # identifier for all output here
# Take the system described by particles and evolve it forward
# in time until it is over. Don't update global quantities,
# don't interpret the outcome. Return the energy error due to
# the smallN integration.
# Temporarily avoid "is_over" problems. If we allow
# collisions to stop early -- when they become too large or
# last too long -- then we need will logic to manage the
# intermediate state that results. TODO
final_scatter_scale = 1.e30 | nbody_system.length
timescale = self.get_timescale()
print("time_scale =", timescale)
if self.resolve_collision_code.unit_converter is None:
end_time = 1.e4 * abs(timescale) # nbody_system.time
delta_t = min(10*abs(timescale), 1.0 | nbody_system.time)
else:
end_time = 1.e4 * abs(timescale)
delta_t = 10*abs(timescale)
print("end_time =", end_time)
print("delta_t =", delta_t)
resolve_collision_code = self.resolve_collision_code
resolve_collision_code.reset()
time = 0 * end_time
resolve_collision_code.set_time(time)
resolve_collision_code.particles.add_particles(self.all_singles_in_evolve)
resolve_collision_code.commit_particles()
#self.particles_before_scaling = self.all_singles_in_evolve.copy()
# Channel to copy values from the code to the set in memory.
# channel = resolve_collision_code.particles.new_channel_to(particles)
initial_scatter_energy = self.get_total_energy(resolve_collision_code)
print(pre, 'number_of_stars =', len(self.all_singles_in_evolve), ' ', self.all_singles_in_evolve.key)
print(pre, 'initial energy =', initial_scatter_energy)
#print particles
delta_t_max = 64*delta_t
while delta_t_max < end_time/10:
delta_t_max *= 2
if self.debug_encounters:
delta_t *= 0.1
initial_delta_t = delta_t
print(pre, 'evolving to time', end_time)
print(pre, 'initial step =', initial_delta_t)
# if self.debug_encounters:
# print(pre, '### START ENCOUNTER ###')
# print(pre, '### snapshot at time %f' % 0.0)
# for p in particles:
# print(pre, '### id=%d, x=%f, y=%f, z=%f,'\
# 'vx=%f, vy=%f, vz=%f' % \
# (p.id, p.x.number, p.y.number, p.z.number,
# p.vx.number, p.vy.number, p.vz.number))
resolve_collision_code.set_break_scale(final_scatter_scale)
while time < end_time:
tt = time
time += delta_t
print(pre, '...to time', time)
# Work with internal steps of initial_delta_t to allow
# checks for quasi-stable motion.
while tt < time:
tt += initial_delta_t
if tt > time:
tt = time
print(pre, ' ...', time, tt, \
'model_time =', resolve_collision_code.model_time)
resolve_collision_code.evolve_model(tt)
print(pre, ' ...back:', \
': model_time =', resolve_collision_code.model_time)
tt = resolve_collision_code.model_time
# Note: Return with tt != time means we have exceeded
# the size limit and don't need to check is_over().
# DEBUGGING:
# if self.debug_encounters:
# print(pre, '### snapshot at time %f' % time.number)
# #resolve_collision_code.update_particle_tree()
# #resolve_collision_code.update_particle_set()
# resolve_collision_code.particles.synchronize_to(particles)
# channel.copy()
# for p in particles:
# print(pre, '### id=%d, x=%f, y=%f, z=%f,'\
# 'vx=%f, vy=%f, vz=%f' % \
# (p.id, p.x.number, p.y.number, p.z.number,
# p.vx.number, p.vy.number, p.vz.number))
# The argument final_scatter_scale is used to limit
# the size of the system. It has to be supplied again
# because the code that determines if the scattering
# is over isn't necessarily the same as
# resolve_collision_code. Currently, only smallN has
# an "is_over()" function. TODO
#
# Return values: 0 - not over
# 1 - over
# 2 - quasi-stable system
# 3 - not over, but size exceeded limit
#
# Note that this is really a stopping condition, and
# should eventually be handled that way. TODO
# We are currently ignoring any possibility of a
# physical collision during the multiples encounter.
# TODO
over = resolve_collision_code.is_over(final_scatter_scale,
0) # verbose = 0
if over:
final_scatter_energy = self.get_total_energy(resolve_collision_code)
scatter_energy_error = final_scatter_energy - initial_scatter_energy
print(pre, 'over =', over, 'at time', tt)
#print pre, 'initial energy =', initial_scatter_energy
#print pre, 'final energy =', final_scatter_energy
#print pre, 'energy error =', scatter_energy_error
print(pre, 'fractional energy error =', scatter_energy_error/initial_scatter_energy)
if self.debug_encounters:
print(pre, '### END ENCOUNTER ###')
# Create a tree in the module representing the binary structure.
resolve_collision_code.update_particle_tree()
# TODO: what happens if we reach over = 2 or 3?
# Note that center of mass particles are now part
# of the particle set...
# Return the tree structure to AMUSE. Children
# are identified by get_children_of_particle in
# interface.??, and the information is returned in
# the copy operation.
resolve_collision_code.update_particle_set()
self.singles_and_multiples_after_evolve.add_particles(resolve_collision_code.particles)
self.particles_before_scaling = resolve_collision_code.particles.copy()
return scatter_energy_error
if tt >= 0.9999999*time:
break
time = resolve_collision_code.model_time
if not self.debug_encounters:
if delta_t < delta_t_max and time > 0.999999*4*delta_t:
delta_t *= 2
print(pre, 'setting delta_t =', delta_t)
raise Exception(
pre + "Did not finish the small-N simulation before end time {0}".
format(end_time)
)
def get_total_energy(self, code):
# ??? from Steve: what is get_binary_energy()?
try:
binaries_energy = code.get_binary_energy() # include binaries
except: # if code understands
binaries_energy = zero
total_energy = code.potential_energy + code.kinetic_energy \
+ binaries_energy
return total_energy
def determine_structure_of_the_evolved_state(self):
"""
Based on the evolved solution determine the hierarchical structure
of the particles (i.e. binary, triples etc).
Structure is determined during evolve singles....
"""
pass
def scale_evolved_state_to_initial_sphere(self):
"""
Scale the system so that all particles are just inside the initial sphere.
Particles should be moving apart.
Implementation should be equivalent to moving the system back in time (or forward
if the system is smaller than the initial scale).
"""
self.scale_code = ScaleSystem(self.kepler_orbits, self.G)
tree = self.singles_and_multiples_after_evolve.new_binary_tree_wrapper()
roots_and_singles = tree.get_children_subset()
self.scale_code.scale_particles_to_sphere(roots_and_singles, 1.01 * self.initial_sphere_radius)
#self.particles_before_scaling = roots_and_singles.copy()
def get_timescale(self):
self.kepler_orbits
self.all_singles_in_evolve
min_period = None
for i, iparticle in enumerate(self.all_singles_in_evolve[:-1]):
for j, jparticle in enumerate(self.all_singles_in_evolve[i+1:]):
period = self.kepler_orbits.get_period(iparticle, jparticle)
print("period =", period)
period = self.kepler_orbits.get_periastron_time(iparticle, jparticle)
print("time =", period)
if min_period is None:
min_period = period
else:
min_period = min_period.min(period)
print("tperi =", min_period)
return min_period
class HandleEncounter(HandleEncounterWithCollisionCode, SelectNeighboursByDistanceMixin):
def __init__(self,
kepler_code,
resolve_collision_code,
interaction_over_code = None,
G = nbody_system.G
):
HandleEncounterWithCollisionCode.__init__(
self,
kepler_code,
resolve_collision_code,
interaction_over_code,
G
)
SelectNeighboursByDistanceMixin.__init__(self)
class StickyHandleEncounter(AbstractHandleEncounter, EmptySelectNeighboursMixin):
def __init__(self, G = nbody_system.G):
AbstractHandleEncounter.__init__(
self,
None,
G
)
def evolve_singles_in_encounter_until_end_state(self):
self.scatter_energy_error = quantities.zero
particles = self.all_singles_in_evolve.copy()
working_set = particles.copy()
parents = Particles()
self.particles_before_scaling = particles.copy()
counter = len(working_set)
while counter > 1 :
number_of_particles = len(working_set)
indices1, indices2 = numpy.triu_indices(number_of_particles, 1)
dd = lambda x : x[indices1] - x[indices2]
dx = dd(working_set.x)
dy = dd(working_set.y)
dz = dd(working_set.z)
distances_squared = (dx**2 + dy**2 + dz**2)
minindex =distances_squared.argmin()
index1 = indices1[minindex]
index2 = indices2[minindex]
partner1 = working_set[index1]
partner2 = working_set[index2]
mass1 = partner1.mass
mass2 = partner2.mass
total_mass = mass1 + mass2
parent = Particle()
parent.mass =mass1 + mass1
parent.position = (partner1.position + partner2.position) / 2.0
parent.velocity = (
(mass1 / total_mass) * partner1.velocity +
(mass2 / total_mass) * partner2.velocity
)
parent.child1 = partner1
parent.child2 = partner2
parents.add_particle(parent)
working_set.remove_particle(partner1)
working_set.remove_particle(partner2)
working_set.add_particle(parent)
counter -= 1
result = Particles()
result.add_particles(particles)
parents = result.add_particles(parents)
for x in parents:
x.child1 = x.child1.as_particle_in_set(result)
x.child2 = x.child2.as_particle_in_set(result)
self.singles_and_multiples_after_evolve.add_particles(result)
def determine_structure_of_the_evolved_state(self):
pass
def scale_evolved_state_to_initial_sphere(self):
pass
def remove_soft_binaries_from_evolved_state(self):
pass
def scale_up_system_if_two_body_scattering(self):
pass
def determine_radius_from_components(self, components):
return components.position.lengths().max() + components.radius.max()
class KeplerOrbits(object):
def __init__(self, kepler_code):
self.kepler_code = kepler_code
def reset(self):
pass
def get_semimajor_axis_and_eccentricity_for_binary_components(self, particle1, particle2):
particles = Particles()
particles.add_particle(particle1)
particles.add_particle(particle2)
self.kepler_code.initialize_from_particles(particles)
return self.kepler_code.get_elements()
def get_period(self, particle1, particle2):
particles = Particles()
particles.add_particle(particle1)
particles.add_particle(particle2)
self.kepler_code.initialize_from_particles(particles)
return self.kepler_code.get_period()
def get_periastron_time(self, particle1, particle2):
particles = Particles()
particles.add_particle(particle1)
particles.add_particle(particle2)
self.kepler_code.initialize_from_particles(particles)
M,_ = self.kepler_code.get_angles()
if M < 0:
self.kepler_code.advance_to_periastron()
else:
self.kepler_code.return_to_periastron()
return self.kepler_code.get_time()
def move_binary(self, scale, true_anomaly, receeding):
if receeding:
# Note: Always end up on an outgoing orbit. If
# periastron > scale, we will be just past periapsis.
if true_anomaly < 0:
self.kepler_code.advance_to_periastron()
self.kepler_code.advance_to_radius(scale)
else:
if self.kepler_code.get_separation() < scale:
self.kepler_code.advance_to_radius(scale)
else:
self.kepler_code.return_to_radius(scale)
else:
print("true_anomaly:", true_anomaly, self.kepler_code.get_separation() , scale)
if true_anomaly > 0:
self.kepler_code.return_to_periastron()
self.kepler_code.return_to_radius(scale)
else:
if self.kepler_code.get_separation() < scale:
self.kepler_code.return_to_radius(scale)
else:
self.kepler_code.advance_to_radius(scale)
def compress_binary(self, particles, scale, receeding = True):
"""
Returns the change in positions and velocities for
the two-body system consisting of 'particle1' and 'particle2'.
After applying the change the particles will lie
inside distance 'scale' of one another.
The final orbit will be receding (moving away from each other).
"""
separation = (particles[1].position - particles[0].position).length()
print("MULTI-POSITIONS:")
print(particles[0].position)
print(particles[1].position)
print("scaling:",separation, scale, separation < scale)
print("scaling2:", (particles[1].position - particles[0].position).length_squared(), scale ** 2)
if separation <= scale:
# particles are already close together, no scaling done
# AVE is this correct, will the particle(s) be receding?
# or should some movement always happen
return particles.position * 0, particles.velocity * 0
self.kepler_code.initialize_from_particles(particles)
true_anomaly, mean_anomaly = self.kepler_code.get_angles()
semimajor_axis, eccentricity = self.kepler_code.get_elements()
periapsis, apoapsis = self.get_periapsis_and_apoapsis(semimajor_axis, eccentricity)
# closest distance plus 1% of the distance between peri and apo
limit = periapsis + 0.01*(apoapsis-periapsis)
if periapsis < scale and limit > scale:
limit = scale
# we cannot scale to smaller than the periapsis distance
if scale < limit:
scale = limit
self.move_binary(scale, true_anomaly, receeding)
rel_position = as_vector_quantity(self.kepler_code.get_separation_vector())
rel_velocity = as_vector_quantity(self.kepler_code.get_velocity_vector())
print("REL POS:", rel_position)
return self.deltas_to_update_binary(particles, rel_position, rel_velocity)
def expand_binary(self, particles, scale, receeding = False):
"""
Returns the change in positions and velocities for
the two-body system consisting of 'particle1' and 'particle2'.
After applying the change the particles will lie
close to distance 'scale' of one another.
The particles will be moving towards each other.
"""
separation = (particles[1].position - particles[0].position).length_squared()
print("separation:", separation, scale, scale**2,separation > scale**2)
if separation > scale**2:
return particles.position * 0, particles.velocity * 0
self.kepler_code.initialize_from_particles(particles)
true_anomaly, mean_anomaly = self.kepler_code.get_angles()
semimajor_axis, eccentricity = self.kepler_code.get_elements()
periapsis, apoapsis = self.get_periapsis_and_apoapsis(semimajor_axis, eccentricity)
# largest distance minus 1% of the distance between peri and apo
limit = apoapsis - 0.01*(apoapsis-periapsis)
print("limit:", scale ** 2, scale, limit, apoapsis, periapsis, eccentricity)
# we cannot scale to larger than the apoapsis distance
# but if eccentricity > 1 we can!
# TURNED OFF TO COMPARE WITH MULTIPLES:
# if eccentricity <= 1 and scale > limit:
if scale > limit and receeding:
return particles.position * 0, particles.velocity * 0
if scale > limit:
scale = limit
print("INPUT:", as_vector_quantity(self.kepler_code.get_separation_vector()))
self.move_binary(scale, true_anomaly, receeding)
rel_position = as_vector_quantity(self.kepler_code.get_separation_vector())
rel_velocity = as_vector_quantity(self.kepler_code.get_velocity_vector())
print("REL POS:", as_vector_quantity(self.kepler_code.get_separation_vector()))
return self.deltas_to_update_binary(particles, rel_position, rel_velocity)
def deltas_to_update_binary(self, particles, relative_position, relative_velocity):
total_mass = particles.mass.sum()
center_of_mass_position = particles.center_of_mass()
center_of_mass_velocity = particles.center_of_mass_velocity()
positions_to_center_of_mass = particles.position - center_of_mass_position
velocities_to_center_of_mass = particles.velocity - center_of_mass_velocity
f = particles[1].mass / total_mass
fractions = numpy.asarray([f, -(1-f)]).reshape(2,1)
delta_positions = (relative_position * fractions) - positions_to_center_of_mass
delta_velocities = (relative_velocity * fractions) - velocities_to_center_of_mass
print("DELTA 1:" , delta_positions[0])
print("DELTA 2:" , delta_positions[1])
return delta_positions, delta_velocities
def get_periapsis_and_apoapsis(self, semimajor_axis, eccentricity):
# periapsis == smallest distance
# apoapsis == largest distance
if eccentricity < 1:
# we hava an ellipsis
periapsis = semimajor_axis * (1-eccentricity)
apoapsis = semimajor_axis * (1+eccentricity)
else:
# we have a parabola or a hyperbola
periapsis = semimajor_axis * (eccentricity-1)
apoapsis = semimajor_axis + periapsis
# apoapsis is infinity, but this is better
# as we only use it for the limit
return periapsis, apoapsis
def sample_binary(self, particles, end_time, number_of_points):
self.kepler_code.initialize_from_particles(particles)
sample_points = quantities.linspace(0.0 * end_time, end_time, number_of_points)
positions = [] | particles.x.unit
velocities = [] | particles.vx.unit
for time in sample_points:
self.kepler_code.transform_to_time(time)
rel_position = as_vector_quantity(self.kepler_code.get_separation_vector())
rel_velocity = as_vector_quantity(self.kepler_code.get_velocity_vector())
dpos, dvel = self.deltas_to_update_binary(particles, rel_position, rel_velocity)
positions.append(particles.position + dpos)
velocities.append(particles.velocity + dvel)
return positions, velocities
class ScaleSystem(object):
def __init__(self, kepler_orbits, G = nbody_system.G):
self.kepler_orbits = kepler_orbits
self.G = G
def move_particle(self, particle, delta_position, delta_velocity):
""""
Move a particle and all of its descendants by delta position
and velocity
"""
particle.position += delta_position
particle.velocity += delta_velocity
tree = trees.BinaryTreeOnParticle(particle)
descendants = tree.get_descendants_subset()
descendants.position += delta_position
descendants.velocity += delta_velocity
def get_particles_with_minimum_separation(self, particles):
positions = particles.position
radii = particles.radius
minimum_separation = None
for i in range(len(particles) - 1):
i_position = positions[i]
j_positions = positions[i+1:]
i_radius = radii[i+1:]
j_radii = radii[i+1:]
delta_positions = i_position - j_positions
dr = delta_positions.lengths()
sum_radii = i_radius + j_radii
delta = dr - sum_radii
index = delta.argmin()
min_delta = delta[index]
if (
minimum_separation is None
or
min_delta < minimum_separation
):
minimum_separation = min_delta
particle_i = particles[i]
particle_j = particles[i+1+index]
return particle_i, particle_j
def scale_particles_to_sphere(self, particles, radius):
"""
Rescale the system of particles to lie within a sphere
of the given radius.
System is moved to the center of mass.
System may be compressed or expanded.
note::
radius can be zero -> the system will be scaled to minimum seperation (using the radii),
the radii of the particles can be zero -> the system will be scaled to radius
note this is not implemented for 2 body yet!!!
"""
print("scale_particles_to_sphere", radius)
center_of_mass_position = particles.center_of_mass()
center_of_mass_velocity = particles.center_of_mass_velocity()
print(center_of_mass_position)
print(center_of_mass_velocity)
particles.position -= center_of_mass_position
particles.velocity -= center_of_mass_velocity
print(particles.position)
print(particles.velocity)
# special case, 1 body
if len(particles) == 1:
"The position and velocity of this particle must be zero"
tree = trees.ChildTreeOnParticle(particles[0])
children = tree.get_children_subset()
if len(children) == 2:
"special case, scale the binary"
scale = 2 * radius
print("scale:", scale)
delta_p, delta_v = self.kepler_orbits.compress_binary(children, scale, receeding = True)
print(delta_p, delta_v)
for particle, dp, dv in zip(children, delta_p, delta_v):
self.move_particle(particle, dp, dv + center_of_mass_velocity)
particles.velocity += center_of_mass_velocity
return
kinetic_energy = particles.kinetic_energy()
potential_energy = particles.potential_energy(G = self.G)
particle0, particle1 = self.get_particles_with_minimum_separation(particles)
sphere_radius = particles.position.lengths().max()
distance = (particle0.position - particle1.position).length()
sum_of_radii = particle0.radius + particle1.radius
separation = distance - sum_of_radii
# special case, 2 bodies, we can use kepler to
# do the scaling in a consistent, energy preserving way
if len(particles) == 2:
if distance < sum_of_radii:
scale = max(2*radius, sum_of_radii)
delta_p, delta_v = self.kepler_orbits.expand_binary(particles, scale, receeding = True)
elif separation > radius:
scale = max(2 * radius, sum_of_radii)
delta_p, delta_v = self.kepler_orbits.compress_binary(particles, scale, receeding = True)
else:
print("AA:",separation, 2 * radius,sum_of_radii)
delta_p, delta_v = self.kepler_orbits.expand_binary(particles, 2 * radius, receeding = True)
for particle, dp, dv in zip(particles, delta_p, delta_v):
self.move_particle(particle, dp, dv + center_of_mass_velocity)
return
# for all other situations, we revert to scaling
# where we preserve energy by scaling
# the velocities
print("DD:", distance, sum_of_radii, distance < sum_of_radii, radius, distance < 2 * radius)
# we need to scale up, as the separation between particles is less than zero
if distance < sum_of_radii:
# use the largest scaling factor
factor_position = max(sum_of_radii / distance, (2 * radius) / distance)
# we need to scale up, as the minimum distance is less than the sphere diameter
elif distance < 2 * radius:
factor_position = (2.0 * radius) / distance
# we need to scale down, the minimum distance is larger than the radius
else:
# we have room to scale down
if distance > sum_of_radii:
if sum_of_radii > (0.0 * radius):
factor_position = sum_of_radii / distance
else:
factor_position = (2.0 * radius) / distance
# we have no room available for any scaling
else:
factor_position = 1.0
factor_velocity_squared = 1.0 - (1.0/factor_position-1.0) * potential_energy/kinetic_energy
if factor_velocity_squared < 0.0:
from amuse.units import units
print(particles.position)
print(particles.velocity)
print(particles.radius)
print("radius", radius)
print("distance", distance.as_quantity_in(units.AU))
print("sum_of_radii", sum_of_radii)
print("factor_position", factor_position)
raise Exception("cannot scale the velocities")
print(particles)
factor_velocity = numpy.sqrt(factor_velocity_squared)
delta_position = factor_position*(particles.position-center_of_mass_position) - particles.position
delta_velocity = center_of_mass_velocity + factor_velocity*(particles.velocity-center_of_mass_velocity) - particles.velocity
for particle, dp, dv in zip(particles, delta_position, delta_velocity):
self.move_particle(particle, dp, dv + center_of_mass_velocity)
#print "MINIMUM:",(2 * radius) , sum_of_radii, (particle0.position - particle1.position).length()
class Binaries(Particles):
def __init__(self, singles):
Particles.__init__(self)
self._private.singles = singles
self.add_particle_function_attribute('components', self.get_children_subset)
def add_particles_to_store(self, keys, attributes = [], values = []):
if len(keys) == 0:
return
given_attributes = set(attributes)
if not "child1" in given_attributes:
raise Exception("a binary must always have a child1 attribute")
if not "child2" in given_attributes:
raise Exception("a binary must always have a child2 attribute")
all_attributes = []
all_values = []
for attribute, value in zip(attributes, values):
all_attributes.append(attribute)
if attribute == 'child1' or attribute == 'child2':
value = value.copy_with_link_transfer(None, self._private.singles)
all_values.append(value)
else:
all_values.append(value)
return super(Binaries, self).add_particles_to_store(keys, all_attributes, all_values)
def remove_particles_from_store(self, keys):
if len(keys) == 0:
return
return super(Binaries, self).remove_particles_from_store(keys)
def get_children_subset(self, binaries, particle):
return binaries._private.singles._subset(keys = (particle.child1.key, particle.child2.key))
class MultiplesStoppingConditions(object):
def __init__(self):
self.multiples_change_detection = code.StoppingCondition('multiples_change_detection')
self.binaries_change_detection = code.StoppingCondition('binaries_change_detection')
self.encounter_detection = code.StoppingCondition('encounter_detection')
def unset(self):
self.multiples_change_detection.unset()
self.binaries_change_detection.unset()
self.encounter_detection.unset()
def disable(self):
self.multiples_change_detection.disable()
self.binaries_change_detection.disable()
self.encounter_detection.disable()
def is_set(self):
return (
self.multiples_change_detection.is_set() or
self.binaries_change_detection.is_set() or
self.encounter_detection.is_set()
)
class Multiples(options.OptionalAttributes):
"""
Data model:
1) particles -> multiples (binaries, ternaries etc.) + singles
are evolved by gravity code
2) multiples -> subset of particles with components
have a list of components
3) component_singles -> singles part of a multiple
are part of a multiple, are stored relative
to the center of mass position and velocity of the multiple
4) binaries -> separate list of particles
have 2 components (in component_singles list)
"""
def __init__(self,
gravity_code = None,
handle_encounter_code = None,
G = nbody_system.G,
**opts
):
options.OptionalAttributes.__init__(self, **opts)
self.gravity_code = gravity_code
self.handle_encounter_code = handle_encounter_code
self.G = G
self.stopping_conditions = MultiplesStoppingConditions()
self.reset()
self.number_of_collisions = 0
self.must_handle_one_encounter_per_stopping_condition = True
def reset(self):
self.components_of_multiples = Particles()
self.multiples = Particles()
self.singles = Particles()
self.particles = ParticlesSuperset(
[self.singles, self.multiples],
index_to_default_set = 0
)
self.singles_in_binaries = Particles()
self.binaries = Binaries(self.singles_in_binaries)
self.singles_in_binaries_previous = None
self.gravity_code.reset()
self.stopping_condition = self.gravity_code.stopping_conditions.collision_detection
self.stopping_condition.enable()
self.channel_from_code_to_model = self.gravity_code.particles.new_channel_to(self.particles)
self.channel_from_model_to_code = self.particles.new_channel_to(self.gravity_code.particles)
self.multiples_external_tidal_correction = zero
self.multiples_internal_tidal_correction = zero
self.multiples_integration_energy_error = zero
self.all_multiples_energy = zero
self.stopping_conditions.disable()
def commit_particles(self):
if len(self.multiples) == 0:
if not len(self.binaries) == 0:
for binary in self.binaries:
multiple = self.multiples.add_particle(binary)
components = self.components_of_multiples.add_particles(binary.components())
components.child1 = None
components.child2 = None
multiple.components = components
multiple.mass = components.mass.sum()
# TODO radius!
multiple.radius = (binary.child1.position - binary.child2.position).length() * 2
multiple.position = components.center_of_mass()
multiple.velocity = components.center_of_mass_velocity()
components.position -= multiple.position
components.velocity -= multiple.velocity
#self.multiples.add_particle(multiple)
#if len(self.singles) == 0:
# self.singles.add_particles(self.particles)
# relink te components so these are in the right set
if len(self.multiples) > 0:
for x in self.multiples:
x.components = x.components.get_intersecting_subset_in(self.components_of_multiples)
#if len(self.particles) == 0:
# self.particles.add_particles(self.singles)
# self.particles.add_particles(self.multiples)
self.gravity_code.particles.add_particles(self.particles)
self.singles_in_binaries_previous = self.singles_in_binaries.copy()
self.all_multiples_energy = self.get_total_energy_of_all_multiples()
def evolve_model(self, time):
self.stopping_conditions.unset()
attributes_to_update = ['mass', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'radius']
self.channel_from_model_to_code.copy_attributes(attributes_to_update)
self.particles.synchronize_to(self.gravity_code.particles)
self.model_time = self.gravity_code.model_time
previous_time = None
while self.model_time < time:
self.gravity_code.evolve_model(time)
self.model_time = self.gravity_code.model_time
self.channel_from_code_to_model.copy()
if self.stopping_condition.is_set():
LOG_ENCOUNTER.info("found collision at time: {0}".format(self.gravity_code.model_time))
initial_energy = self.gravity_code.get_total_energy()
self.gravity_code.synchronize_model()
self.channel_from_code_to_model.copy()
self.handle_stopping_condition()
self.particles.synchronize_to(self.gravity_code.particles)
for i,k in enumerate(self.gravity_code.particles.key):
print(i, k)
self.channel_from_model_to_code.copy()
final_energy = self.gravity_code.get_total_energy()
self.update_energy_bookkeeping(initial_energy, final_energy)
if not previous_time is None and previous_time == self.model_time:
break
if self.stopping_conditions.is_set():
break
previous_time = self.model_time
if len(self.particles) == 1:
break
def synchronize_model(self):
"""
updates the singles to the right position
"""
def get_total_energy_of_all_multiples(self):
result = zero
for x in self.multiples:
result += self.get_energy_of_a_multiple(x)
return result
def get_energy_of_a_multiple(self, multiple):
components = multiple.components
tree = components.new_binary_tree_wrapper()
singles = Particles()
for node in tree.iter_descendant_leafs():
singles.add_particle(node.particle)
# tree is stored in rest state,
# no energy of central particle
energy = singles.kinetic_energy()
energy += singles.potential_energy(G = self.G)
return energy
def update_energy_bookkeeping(self, initial_energy, final_energy):
dE_gravity_code = final_energy - initial_energy
self.local_energy_error = (
dE_gravity_code
- self.handle_encounter_code.delta_energy
- self.handle_encounter_code.delta_potential_in_field
)
self.internal_local_energy_error = (
dE_gravity_code
+ self.handle_encounter_code.delta_multiple_energy
- self.handle_encounter_code.delta_potential_in_field
)
self.corrected_internal_local_energy_error = (
dE_gravity_code
+ self.handle_encounter_code.delta_multiple_energy
- self.handle_encounter_code.delta_potential_in_field
+ self.handle_encounter_code.delta_internal_potential
- self.handle_encounter_code.scatter_energy_error
)
LOG_ENERGY.info('net local error = {0}'.format(self.local_energy_error))
LOG_ENERGY.info('net local internal error = {0}'.format(self.internal_local_energy_error))
LOG_ENERGY.info('corrected local internal error = {0}'.format(self.corrected_internal_local_energy_error))
self.multiples_external_tidal_correction += self.handle_encounter_code.delta_potential_in_field
self.multiples_internal_tidal_correction -= self.handle_encounter_code.delta_internal_potential
self.multiples_integration_energy_error += self.handle_encounter_code.scatter_energy_error
self.all_multiples_energy = self.get_total_energy_of_all_multiples()
self.total_energy = final_energy + self.all_multiples_energy
self.corrected_total_energy = (
self.total_energy
- self.multiples_external_tidal_correction
- self.multiples_internal_tidal_correction
- self.multiples_integration_energy_error
)
LOG_ENERGY.info('total energy (top+mul) = {0}'.format(self.total_energy))
LOG_ENERGY.info('corrected_total energy (top+mul) = {0}'.format(self.corrected_total_energy))
@property
def all_singles(self):
result = self.singles.copy()
for multiple in self.multiples:
components = multiple.components
tree = components.new_binary_tree_wrapper()
subset = Particles()
for node in tree.iter_descendant_leafs():
subset.add_particle(node.particle)
subset.position += multiple.position
subset.velocity += multiple.velocity
delattr(subset, 'child1')
delattr(subset, 'child2')
result.add_particles(subset)
return result
def handle_stopping_condition(self):
encounters = self.determine_encounters()
new_binaries = Particles()
dissolved_binaries = Particles()
updated_binaries = Particles()
new_multiples = Particles()
dissolved_multiples = Particles()
updated_multiples = Particles()
for particles_in_encounter in encounters:
self.handle_encounter(
particles_in_encounter,
new_binaries,
dissolved_binaries,
updated_binaries,
new_multiples,
dissolved_multiples,
updated_multiples
)
if self.stopping_conditions.multiples_change_detection.is_enabled():
if len(new_multiples) > 0 or len(dissolved_multiples) > 0 or len(updated_multiples) > 0:
self.stopping_conditions.multiples_change_detection.set(
new_multiples,
dissolved_multiples,
updated_multiples
)
if self.stopping_conditions.binaries_change_detection.is_enabled():
if len(new_binaries) > 0 or len(dissolved_binaries) > 0 or len(updated_binaries) > 0 :
self.stopping_conditions.binaries_change_detection.set(
new_binaries.get_intersecting_subset_in(self.binaries),
dissolved_binaries,
updated_binaries.get_intersecting_subset_in(self.binaries)
)
def handle_encounter(
self,
particles_in_encounter,
new_binaries,
dissolved_binaries,
updated_binaries,
new_multiples,
dissolved_multiples,
updated_multiples
):
code = self.handle_encounter_code
code.reset()
before = particles_in_encounter.copy()
LOG_ENCOUNTER.info("found encounter with particles {0}".format(particles_in_encounter.key))
code.particles_in_encounter.add_particles(particles_in_encounter)
print(self.particles , particles_in_encounter)
code.particles_in_field.add_particles(self.particles - particles_in_encounter)
code.existing_binaries.add_particles(self.binaries)
code.existing_multiples.add_particles(self.multiples)
LOG_ENCOUNTER.info("handling encounter, {0} particles in encounter".format(len(code.particles_in_encounter)))
code.execute()
LOG_ENCOUNTER.info(
"handling encounter, finished, {0} new multiples, {1} dissolved multiples, {2} updated multiples".format(
len(code.new_multiples),
len(code.dissolved_multiples),
len(code.updated_multiples
))
)
new_multiples.add_particles(code.new_multiples)
dissolved_multiples.add_particles(code.dissolved_multiples)
for x in dissolved_multiples:
x.components = x.components.copy()
updated_multiples.add_particles(code.updated_multiples)
for x in updated_multiples:
x.components = x.components.copy()
LOG_ENCOUNTER.info("captured singles: {0}".format(code.captured_singles.key))
LOG_ENCOUNTER.info("released singles: {0}".format(code.released_singles.key))
# update the singles (will have singles and multiples)
self.singles.remove_particles(code.captured_singles)
self.singles.add_particles(code.released_singles)
LOG_ENCOUNTER.info("dissolved multiples: {0}".format(code.dissolved_multiples.key))
LOG_ENCOUNTER.info("new multiples: {0}".format(code.new_multiples.key))
# update multiples
self.multiples.remove_particles(code.dissolved_multiples)
for x in code.dissolved_multiples:
self.components_of_multiples.remove_particles(x.components)
new_multiples = self.multiples.add_particles(code.new_multiples)
for x in new_multiples:
x.components = self.components_of_multiples.add_particles(x.components)
self.number_of_collisions += 1
#io.write_set_to_file((code.all_particles_in_encounter, code.particles_after_encounter, code.particles_before_scaling), "encounter-{0}.h5".format(self.number_of_collisions), "amuse", names=('before', 'after', 'after_smalln'), version="2.0", append_to_file=False)
# code.all_particles_in_encounter
# update binaries
for x in code.dissolved_binaries:
self.singles_in_binaries.remove_particle(x.child1)
self.singles_in_binaries.remove_particle(x.child2)
for x in code.new_binaries:
self.singles_in_binaries.add_particle(x.child1)
self.singles_in_binaries.add_particle(x.child2)
for x in code.updated_binaries:
child1 = x.child1.as_particle_in_set(self.singles_in_binaries)
child1.position = x.child1.position
child1.velocity = x.child1.velocity
child2 = x.child2.as_particle_in_set(self.singles_in_binaries)
child2.position = x.child2.position
child2.velocity = x.child2.velocity
self.binaries.remove_particles(code.dissolved_binaries)
self.binaries.add_particles(code.new_binaries)
self.singles_in_binaries_previous = self.singles_in_binaries.copy()
if self.stopping_conditions.encounter_detection.is_enabled():
model = Particles()
particles_before_encounter = Particles()
particles_before_encounter.add_particles(code.all_particles_in_encounter)
particles_after_encounter = Particles()
particles_after_encounter.add_particles(code.particles_after_encounter)
particle = Particle()
particle.particles_before_encounter = particles_before_encounter
particle.particles_after_encounter = particles_after_encounter
model.add_particle(particle)
self.stopping_conditions.encounter_detection.set(model)
channel = code.updated_binaries.new_channel_to(self.binaries)
new_binaries.add_particles(code.new_binaries)
dissolved_binaries.add_particles(code.dissolved_binaries)
updated_binaries.add_particles(code.updated_binaries)
if 0:
print("before:", particles_in_encounter)
self.particles.remove_particles(particles_in_encounter)
print("after:", code.particles_after_encounter.key)
self.particles.add_particles(code.particles_after_encounter)
elif 0:
self.gravity_code.particles.remove_particles(particles_in_encounter)
channel = code.particles_after_encounter.new_channel_to(self.particles)
channel.copy_attributes(["x","y","z", "vx", "vy","vz"])
def determine_encounters(self):
particles0 = self.stopping_condition.particles(0).copy()
particles1 = self.stopping_condition.particles(1).copy()
if self.must_handle_one_encounter_per_stopping_condition:
particles0 = particles0[:1]
particles1 = particles1[:1]
encounters = []
from_key_to_encounter = {}
for particle0, particle1 in zip(particles0, particles1):
key0 = particle0.key
key1 = particle1.key
if key0 in from_key_to_encounter:
if key1 in from_key_to_encounter:
encounter0 = from_key_to_encounter[key0]
encounter1 = from_key_to_encounter[key1]
if not encounter0 is encounter1:
encounter0.add_particles(encounter1)
encounter1.remove_particles(encounter1)
for x in encounter0:
from_key_to_encounter[x.key] = encounter0
else:
encounter = from_key_to_encounter[key0]
encounter.add_particle(particle1)
from_key_to_encounter[key1] = encounter
elif key1 in from_key_to_encounter:
encounter = from_key_to_encounter[key1]
encounter.add_particle(particle0)
from_key_to_encounter[key0] = encounter
else:
encounter = Particles()
encounter.add_particle(particle0)
encounter.add_particle(particle1)
encounters.append(encounter)
from_key_to_encounter[key0] = encounter
from_key_to_encounter[key1] = encounter
return [x for x in encounters if len(x) > 0]
def get_total_energy(self):
self.total_energy = self.gravity_code.get_total_energy() + self.all_multiples_energy
self.corrected_total_energy = (
self.total_energy
- self.multiples_external_tidal_correction
- self.multiples_internal_tidal_correction
- self.multiples_integration_energy_error
)
return self.corrected_total_energy
def update_model(self):
# we do all the work on the singles_in_binaries_previous set
# this makes sure all keys match attributes
self.singles_in_binaries_previous.delta_position = self.singles_in_binaries.position - self.singles_in_binaries_previous.position
self.singles_in_binaries_previous.delta_velocity = self.singles_in_binaries.velocity - self.singles_in_binaries_previous.velocity
# take not yet updated positions
channel = self.components_of_multiples.new_channel_to(self.singles_in_binaries_previous)
channel.copy_attributes(['x', 'y', 'z', 'vx', 'vy', 'vz'])
# update these
self.singles_in_binaries_previous.position += self.singles_in_binaries_previous.delta_position
self.singles_in_binaries_previous.velocity += self.singles_in_binaries_previous.delta_velocity
# copy back
channel = self.singles_in_binaries_previous.new_channel_to(self.components_of_multiples)
channel.copy_attributes(['x', 'y', 'z', 'vx', 'vy', 'vz'])
channel = self.singles_in_binaries.new_channel_to(self.components_of_multiples)
channel.copy_attribute('mass')
for multiple in self.multiples:
components = self.get_singles_of_a_multiple(multiple)
multiple.mass = components.mass.sum()
center_of_mass = components.center_of_mass()
center_of_mass_velocity = components.center_of_mass_velocity()
multiple.position += center_of_mass
multiple.velocity += center_of_mass_velocity
components.position -= center_of_mass
components.velocity -= center_of_mass_velocity
attributes_to_update = ['mass', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'radius']
channel = self.particles.new_channel_to(self.gravity_code.particles)
channel.copy_attributes(attributes_to_update)
self.singles_in_binaries_previous = self.singles_in_binaries.copy()
def get_singles_of_a_multiple(self, multiple):
components = multiple.components
tree = components.new_binary_tree_wrapper()
singles = Particles()
for node in tree.iter_descendant_leafs():
singles.add_particle(node.particle)
return singles
| 95,615
| 41.723861
| 270
|
py
|
amuse
|
amuse-main/src/amuse/couple/parallel_stellar_evolution.py
|
import sys
import traceback
import numpy
import threading
from amuse.units import units
from amuse.datamodel import ParticlesSuperset
from amuse.support.exceptions import AmuseException
from amuse.community.interface.se import merge_colliding_in_stellar_evolution_code
from amuse.support.options import option, OptionalAttributes
class ParallelStellarEvolution(OptionalAttributes):
def __init__(
self,
stellar_evolution_class,
number_of_workers=1,
individual_options=None,
**options
):
OptionalAttributes.__init__(self, **options)
self.code_factory = stellar_evolution_class
self.number_of_workers = number_of_workers
self.model_time = 0.0 | units.Myr
if individual_options is None:
options_list = [options] * number_of_workers
else:
options_list = [options.copy() for i in range(number_of_workers)]
for individual, shared in zip(individual_options, options_list):
shared.update(individual)
threads = [ThreadWithResult(target=stellar_evolution_class, kwargs=options_list[i]) for i in range(number_of_workers)]
self.code_instances = self._execute_all_threads(threads)
self.particles = ParallelParticlesSuperset(
[code.particles for code in self.code_instances],
execute_all_threads_func=self._execute_all_threads)
self.parameters = ParallelParameters(self.code_instances)
@option(type='boolean', sections=('code'))
def must_run_threaded(self):
return True
def _execute_all_threads(self, threads):
if self.must_run_threaded:
return self._execute_all_threads_parallel(threads)
else:
return self._execute_all_threads_serial(threads)
def _execute_all_threads_parallel(self, threads):
for x in threads:
x.start()
for x in threads:
x.join()
result = [x.get_result() for x in threads]
if not result == [None]*len(threads):
return result
def _execute_all_threads_serial(self, threads):
for x in threads:
x.run()
result = [x.get_result() for x in threads]
if not result == [None]*len(threads):
return result
def _run_threaded(self, function_name, args=()):
threads = [ThreadWithResult(target=getattr(code, function_name), args=args) for code in self.code_instances]
return self._execute_all_threads(threads)
def initialize_code(self):
self._run_threaded("initialize_code")
def commit_parameters(self):
self._run_threaded("commit_parameters")
def recommit_parameters(self):
self._run_threaded("recommit_parameters")
def commit_particles(self):
self._run_threaded("commit_particles")
def recommit_particles(self):
self._run_threaded("recommit_particles")
def evolve_model(self, end_time):
self._run_threaded("evolve_model", args=(end_time,))
self.model_time = end_time
def cleanup_code(self):
self._run_threaded("cleanup_code")
def stop(self):
self._run_threaded("stop")
def merge_colliding(self, *args, **kwargs):
return merge_colliding_in_stellar_evolution_code(self, *args, **kwargs)
def new_particle_from_model(self, internal_structure, current_age, key=None):
index = self.particles.next_index_of_code_instance_for_new_particle_from_model()
return self.code_instances[index].new_particle_from_model(internal_structure, current_age, key=key)
class ThreadWithResult(threading.Thread):
def __init__(self, target=None, args=(), kwargs=dict()):
self.__target = target
self.__args = args
self.__kwargs = kwargs
self.result = None
self.caught_exception = False
threading.Thread.__init__(self, target=target, args=args, kwargs=kwargs)
def run(self):
try:
self.result = self.__target(*self.__args, **self.__kwargs)
except Exception:
self.caught_exception = True
self.result = sys.exc_info()
def get_result(self):
if self.caught_exception:
raise self.result[1].with_traceback(self.result[2])
return self.result
class ParallelParticlesSuperset(ParticlesSuperset):
def __init__(self, particle_sets, execute_all_threads_func=None):
ParticlesSuperset.__init__(self, particle_sets)
self._private.number_of_particles = 0
self._private.number_of_sets = len(particle_sets)
self._private.execute_all_threads_func = execute_all_threads_func
def add_particles_to_store(self, keys, attributes = [], values = []):
slices = [slice((i-self._private.number_of_particles) % self._private.number_of_sets, len(keys),
self._private.number_of_sets) for i in range(self._private.number_of_sets)]
threads = [
ThreadWithResult(
target=particle_set.add_particles_to_store,
args=(keys[one_slice], attributes, [v[one_slice] for v in values])
) for particle_set, one_slice in zip(self._private.particle_sets, slices) if len(keys[one_slice])]
self._private.execute_all_threads_func(threads)
self._private.number_of_particles += len(keys)
def next_index_of_code_instance_for_new_particle_from_model(self):
next_index = -self._private.number_of_particles % self._private.number_of_sets
self._private.number_of_particles += 1
return next_index
class ParallelParameters(object):
def __init__(self, code_instances):
object.__setattr__(self, "code_instances", code_instances)
def __getattr__(self, attribute_name):
return getattr(object.__getattribute__(self, "code_instances")[0].parameters, attribute_name)
def __setattr__(self, attribute_name, value):
for code in object.__getattribute__(self, "code_instances"):
setattr(code.parameters, attribute_name, value)
| 6,255
| 35.584795
| 126
|
py
|
amuse
|
amuse-main/src/amuse/data/__init__.py
|
# Data directory for AMUSE
| 27
| 13
| 26
|
py
|
amuse
|
amuse-main/src/amuse/ext/composition_methods.py
|
"""
selection of composition integration methods
always of the form:
name(EVOLVEA,EVOLVEB,dt)
in case EVOLVEA and EVOLVEB are symplectic, the compositions
are symplectic
included:
LEAPFROG
SPLIT_4TH_S_M6
SPLIT_4TH_S_M5
SPLIT_4TH_S_M4
SPLIT_6TH_SS_M11
SPLIT_6TH_SS_M13
SPLIT_8TH_SS_M21
SPLIT_10TH_SS_M35
"""
def LEAPFROG( EVOLVEA, EVOLVEB,dt):
EVOLVEA(dt/2)
EVOLVEB(dt)
EVOLVEA(dt/2)
def SPLIT_4TH_S_M6(EVOLVEA,EVOLVEB,dt):
K1 = 0.0792036964311957
K2 = 0.353172906049774
K3 = -.0420650803577195
K4 = 1. - 2*(K1 + K2 + K3)
D1 = 0.209515106613362
D2 = -.143851773179818
D3 = 0.5 - D1 - D2
EVOLVEA(K1*dt)
EVOLVEB(D1*dt)
EVOLVEA(K2*dt)
EVOLVEB(D2*dt)
EVOLVEA(K3*dt)
EVOLVEB(D3*dt)
EVOLVEA(K4*dt)
EVOLVEB(D3*dt)
EVOLVEA(K3*dt)
EVOLVEB(D2*dt)
EVOLVEA(K2*dt)
EVOLVEB(D1*dt)
EVOLVEA(K1*dt)
def SPLIT_4TH_S_M5(EVOLVEA,EVOLVEB,dt):
K1 = ( (14-(19.**0.5))/108 )
K2 = ( (20-7*(19.**0.5))/108 )
K3 = ( (1/2.)-(K1+K2) )
D1 = ( 2/5. )
D2 = ( -1/10. )
D3 = ( 1-(2*D1+2*D2) )
EVOLVEA(K1*dt)
EVOLVEB(D1*dt)
EVOLVEA(K2*dt)
EVOLVEB(D2*dt)
EVOLVEA(K3*dt)
EVOLVEB(D3*dt)
EVOLVEA(K3*dt)
EVOLVEB(D2*dt)
EVOLVEA(K2*dt)
EVOLVEB(D1*dt)
EVOLVEA(K1*dt)
def SPLIT_4TH_S_M4(EVOLVEA,EVOLVEB,dt):
K1 = ( (642+( 471.**0.5 ))/3924 )
K2 = ( 121*(12- ( 471.**0.5 ) )/3924 )
K3 = ( 1.-2*(K1+K2) )
D1 = ( 6/11. )
D2 = ( 0.5 - D1 )
EVOLVEA(K1*dt)
EVOLVEB(D1*dt)
EVOLVEA(K2*dt)
EVOLVEB(D2*dt)
EVOLVEA(K3*dt)
EVOLVEB(D2*dt)
EVOLVEA(K2*dt)
EVOLVEB(D1*dt)
EVOLVEA(K1*dt)
# symmetric composition of symmetric maps, 6th order, m=11
def SPLIT_6TH_SS_M11(EVOLVEA,EVOLVEB,dt):
C1 = ( 0.21375583945878254555518066964857 )
C2 = ( 0.18329381407425713911385974425217 )
C3 = ( 0.17692819473098943794898811709929 )
C4 = ( -0.44329082681170215849622829626258 )
C5 = ( 0.11728560432865935385403585669136 )
C6 = ( 0.50405474843802736404832781714239 )
EVOLVEA(C1*dt/2)
EVOLVEB(C1*dt)
EVOLVEA((C1+C2)*dt/2)
EVOLVEB(C2*dt)
EVOLVEA((C2+C3)*dt/2)
EVOLVEB(C3*dt)
EVOLVEA((C3+C4)*dt/2)
EVOLVEB(C4*dt)
EVOLVEA((C4+C5)*dt/2)
EVOLVEB(C5*dt)
EVOLVEA((C5+C6)*dt/2)
EVOLVEB(C6*dt)
EVOLVEA((C5+C6)*dt/2)
EVOLVEB(C5*dt)
EVOLVEA((C4+C5)*dt/2)
EVOLVEB(C4*dt)
EVOLVEA((C3+C4)*dt/2)
EVOLVEB(C3*dt)
EVOLVEA((C2+C3)*dt/2)
EVOLVEB(C2*dt)
EVOLVEA((C1+C2)*dt/2)
EVOLVEB(C1*dt)
EVOLVEA(C1*dt/2)
# symmetric composition of symmetric maps, 6th order, m=13
def SPLIT_6TH_SS_M13(EVOLVEA,EVOLVEB,dt):
C1 = ( 0.13861930854051695245808013042625 )
C2 = ( 0.13346562851074760407046858832209 )
C3 = ( 0.13070531011449225190542755785015 )
C4 = ( 0.12961893756907034772505366537091 )
C5 = ( -0.35000324893920896516170830911323 )
C6 = ( 0.11805530653002387170273438954049 )
C7 = ( 0.39907751534871587459988795520665 )
EVOLVEA(C1*dt/2)
EVOLVEB(C1*dt)
EVOLVEA((C1+C2)*dt/2)
EVOLVEB(C2*dt)
EVOLVEA((C2+C3)*dt/2)
EVOLVEB(C3*dt)
EVOLVEA((C3+C4)*dt/2)
EVOLVEB(C4*dt)
EVOLVEA((C4+C5)*dt/2)
EVOLVEB(C5*dt)
EVOLVEA((C5+C6)*dt/2)
EVOLVEB(C6*dt)
EVOLVEA((C6+C7)*dt/2)
EVOLVEB(C7*dt)
EVOLVEA((C6+C7)*dt/2)
EVOLVEB(C6*dt)
EVOLVEA((C5+C6)*dt/2)
EVOLVEB(C5*dt)
EVOLVEA((C4+C5)*dt/2)
EVOLVEB(C4*dt)
EVOLVEA((C3+C4)*dt/2)
EVOLVEB(C3*dt)
EVOLVEA((C2+C3)*dt/2)
EVOLVEB(C2*dt)
EVOLVEA((C1+C2)*dt/2)
EVOLVEB(C1*dt)
EVOLVEA(C1*dt/2)
# symmetric composition of symmetric maps, 8th order, m=21
def SPLIT_8TH_SS_M21(EVOLVEA,EVOLVEB,dt):
C1 = ( 0.10647728984550031823931967854896 )
C2 = ( 0.10837408645835726397433410591546 )
C3 = ( 0.35337821052654342419534541324080 )
C4 = ( -0.23341414023165082198780281128319 )
C5 = ( -0.24445266791528841269462171413216 )
C6 = ( 0.11317848435755633314700952515599 )
C7 = ( 0.11892905625000350062692972283951 )
C8 = ( 0.12603912321825988140305670268365 )
C9 = ( 0.12581718736176041804392391641587 )
C10 = ( 0.11699135019217642180722881433533 )
C11 = ( -0.38263596012643665350944670744040 )
EVOLVEA(C1*dt/2)
EVOLVEB(C1*dt)
EVOLVEA((C1+C2)*dt/2)
EVOLVEB(C2*dt)
EVOLVEA((C2+C3)*dt/2)
EVOLVEB(C3*dt)
EVOLVEA((C3+C4)*dt/2)
EVOLVEB(C4*dt)
EVOLVEA((C4+C5)*dt/2)
EVOLVEB(C5*dt)
EVOLVEA((C5+C6)*dt/2)
EVOLVEB(C6*dt)
EVOLVEA((C6+C7)*dt/2)
EVOLVEB(C7*dt)
EVOLVEA((C7+C8)*dt/2)
EVOLVEB(C8*dt)
EVOLVEA((C8+C9)*dt/2)
EVOLVEB(C9*dt)
EVOLVEA((C9+C10)*dt/2)
EVOLVEB(C10*dt)
EVOLVEA((C10+C11)*dt/2)
EVOLVEB(C11*dt)
EVOLVEA((C10+C11)*dt/2)
EVOLVEB(C10*dt)
EVOLVEA((C9+C10)*dt/2)
EVOLVEB(C9*dt)
EVOLVEA((C8+C9)*dt/2)
EVOLVEB(C8*dt)
EVOLVEA((C7+C8)*dt/2)
EVOLVEB(C7*dt)
EVOLVEA((C6+C7)*dt/2)
EVOLVEB(C6*dt)
EVOLVEA((C5+C6)*dt/2)
EVOLVEB(C5*dt)
EVOLVEA((C4+C5)*dt/2)
EVOLVEB(C4*dt)
EVOLVEA((C3+C4)*dt/2)
EVOLVEB(C3*dt)
EVOLVEA((C2+C3)*dt/2)
EVOLVEB(C2*dt)
EVOLVEA((C1+C2)*dt/2)
EVOLVEB(C1*dt)
EVOLVEA(C1*dt/2)
# symmetric composition of symmetric maps, 10th order, m=35
def SPLIT_10TH_SS_M35(EVOLVEA,EVOLVEB,dt):
C1 = ( 0.078795722521686419263907679337684 )
C2 = ( 0.31309610341510852776481247192647 )
C3 = ( 0.027918383235078066109520273275299 )
C4 = ( -0.22959284159390709415121339679655 )
C5 = ( 0.13096206107716486317465685927961 )
C6 = ( -0.26973340565451071434460973222411 )
C7 = ( 0.074973343155891435666137105641410 )
C8 = ( 0.11199342399981020488957508073640 )
C9 = ( 0.36613344954622675119314812353150 )
C10 = ( -0.39910563013603589787862981058340 )
C11 = ( 0.10308739852747107731580277001372 )
C12 = ( 0.41143087395589023782070411897608 )
C13 = ( -0.0048663605831352617621956593099771 )
C14 = ( -0.39203335370863990644808193642610 )
C15 = ( 0.051942502962449647037182904015976 )
C16 = ( 0.050665090759924496335874344156866 )
C17 = ( 0.049674370639729879054568800279461 )
C18 = ( 0.049317735759594537917680008339338 )
EVOLVEA(C1*dt/2)
EVOLVEB(C1*dt)
EVOLVEA((C1+C2)*dt/2)
EVOLVEB(C2*dt)
EVOLVEA((C2+C3)*dt/2)
EVOLVEB(C3*dt)
EVOLVEA((C3+C4)*dt/2)
EVOLVEB(C4*dt)
EVOLVEA((C4+C5)*dt/2)
EVOLVEB(C5*dt)
EVOLVEA((C5+C6)*dt/2)
EVOLVEB(C6*dt)
EVOLVEA((C6+C7)*dt/2)
EVOLVEB(C7*dt)
EVOLVEA((C7+C8)*dt/2)
EVOLVEB(C8*dt)
EVOLVEA((C8+C9)*dt/2)
EVOLVEB(C9*dt)
EVOLVEA((C9+C10)*dt/2)
EVOLVEB(C10*dt)
EVOLVEA((C10+C11)*dt/2)
EVOLVEB(C11*dt)
EVOLVEA((C11+C12)*dt/2)
EVOLVEB(C12*dt)
EVOLVEA((C12+C13)*dt/2)
EVOLVEB(C13*dt)
EVOLVEA((C13+C14)*dt/2)
EVOLVEB(C14*dt)
EVOLVEA((C14+C15)*dt/2)
EVOLVEB(C15*dt)
EVOLVEA((C15+C16)*dt/2)
EVOLVEB(C16*dt)
EVOLVEA((C16+C17)*dt/2)
EVOLVEB(C17*dt)
EVOLVEA((C17+C18)*dt/2)
EVOLVEB(C18*dt)
EVOLVEA((C17+C18)*dt/2)
EVOLVEB(C17*dt)
EVOLVEA((C16+C17)*dt/2)
EVOLVEB(C16*dt)
EVOLVEA((C15+C16)*dt/2)
EVOLVEB(C15*dt)
EVOLVEA((C14+C15)*dt/2)
EVOLVEB(C14*dt)
EVOLVEA((C13+C14)*dt/2)
EVOLVEB(C13*dt)
EVOLVEA((C12+C13)*dt/2)
EVOLVEB(C12*dt)
EVOLVEA((C11+C12)*dt/2)
EVOLVEB(C11*dt)
EVOLVEA((C10+C11)*dt/2)
EVOLVEB(C10*dt)
EVOLVEA((C9+C10)*dt/2)
EVOLVEB(C9*dt)
EVOLVEA((C8+C9)*dt/2)
EVOLVEB(C8*dt)
EVOLVEA((C7+C8)*dt/2)
EVOLVEB(C7*dt)
EVOLVEA((C6+C7)*dt/2)
EVOLVEB(C6*dt)
EVOLVEA((C5+C6)*dt/2)
EVOLVEB(C5*dt)
EVOLVEA((C4+C5)*dt/2)
EVOLVEB(C4*dt)
EVOLVEA((C3+C4)*dt/2)
EVOLVEB(C3*dt)
EVOLVEA((C2+C3)*dt/2)
EVOLVEB(C2*dt)
EVOLVEA((C1+C2)*dt/2)
EVOLVEB(C1*dt)
EVOLVEA(C1*dt/2)
| 7,430
| 23.524752
| 62
|
py
|
amuse
|
amuse-main/src/amuse/ext/halogen_model.py
|
from amuse.community.halogen.interface import Halogen
from amuse.datamodel import ParticlesWithUnitsConverted
def new_halogen_model(number_of_particles, convert_nbody = None, do_scale = False,
redirection = 'null', **keyword_arguments):
"""
Create an alpha-beta-gamma-model using Halogen with the given number of
particles. Returns a set of equal-mass particles self-consistently sampled
from the spherically symmetric density distribution defined by the alpha,
beta, and gamma parameters. The model is centered around the origin.
Positions and velocities are optionally scaled such that the kinetic and
potential energies are 0.25 and -0.5 in nbody-units, respectively.
The alpha, beta, and gamma parameters are (of course) required, but all
other Halogen parameters can be used too, e.g.
new_halogen_model(..., black_hole_mass = 1.0e6 | units.MSun)
will set halogen.parameters.black_hole_mass to this value. See
help(Halogen().parameters) for an overview of the Halogen parameters.
:argument number_of_particles: Number of particles to generate in the model
:argument convert_nbody: When given will convert the resulting set to SI units
:argument do_scale: scale the result to exact nbody units (M=1, K=0.25, U=-0.5)
:argument alpha: alpha parameter in density profile (see amuse/community/halogen/src/doc for details)
:argument beta: beta parameter in density profile (see amuse/community/halogen/src/doc for details)
:argument gamma: gamma parameter in density profile (see amuse/community/halogen/src/doc for details)
"""
instance = Halogen(unit_converter=convert_nbody, redirection=redirection)
instance.parameters.number_of_particles = number_of_particles
for (key, value) in keyword_arguments.items():
setattr(instance.parameters, key, value)
instance.generate_particles()
result = instance.particles.copy()
instance.stop()
result.move_to_center()
if do_scale:
result.scale_to_standard()
if not convert_nbody is None:
result = ParticlesWithUnitsConverted(result, convert_nbody.as_converter_from_si_to_generic())
result = result.copy()
return result
| 2,247
| 48.955556
| 105
|
py
|
amuse
|
amuse-main/src/amuse/ext/static_potentials.py
|
"""
The analytic potential of the galaxy so it can be used in Bridge as an external potentail.
Most equations are taken from B&T:
Binney and Tremaine, Galactic Dynamics, Second Edition
"""
import numpy
from amuse.units import units, constants, quantities
from amuse.datamodel import Particle, Particles
from amuse.support.exceptions import AmuseException
from io import StringIO
class Abstract_Potential(object):
def get_gravity_at_point(self, eps, x,y,z):
""" derive the gravity from the potential """
phi_0 = self.get_potential_at_point(eps, x,y,z)
dpos = 0.001*(x**2+y**2+z**2).sqrt()
phi_dx = self.get_potential_at_point(0,x+dpos,y,z) - phi_0
phi_dy = self.get_potential_at_point(0,x,y+dpos,z) - phi_0
phi_dz = self.get_potential_at_point(0,x,y, z+dpos) - phi_0
return -phi_dx/dpos, -phi_dy/dpos, -phi_dz/dpos
def get_potential_at_point(self, eps, x, y, z):
""" Abstract function, to be overwritten by subclass """
pass
def flattened_potential(self, x, y, z, a, b, mass):
"""
Following eq. 2.69a of B&T
a=0 gives plummer potential
b=0 gives Kuzmin's potential for razor-thin disc
"""
r_squared = x**2+y**2
return -constants.G * mass / (r_squared + (a + (z**2 + b**2).sqrt())**2).sqrt()
def power_law_potential(self, r, alpha, r_0, mass_0):
""" Following eq. 2.62 of B&T """
rho_0 = mass_0 / (4./3. * numpy.pi * r_0**3)
phi_0 = - constants.G * mass_0 / r_0
v_circ_squared = 4 * numpy.pi * constants.G * rho_0 * r_0**alpha / (3 - alpha)
if alpha == 2:
phi_minus_phi_0 = - v_circ_squared * numpy.log(r/r_0)
else:
phi_minus_phi_0 = - v_circ_squared * (r_0**(2-alpha) - r**(2-alpha))/(alpha-2)
return -(phi_minus_phi_0 + phi_0)
def point_mass_potential(self, r, mass):
""" See eq. 2.34 of B&T """
return -constants.G * mass / r
def point_mass_gravity(self, r, mass, unit_vector):
""" See eq. 2.27a of B&T """
return -constants.G * mass / r**2 * unit_vector
class Disc_Bulge_Halo_Potential(Abstract_Potential):
def halo_potential(self, x,y,z, Mc, Rc):
""" TODO: Find the source for this potential -> McMillan & Portegies Zwart 2000?"""
r=(x**2+y**2+z**2).sqrt()
rr = (r/Rc)
return constants.G * (Mc/Rc)*(0.5*numpy.log(1 +rr**2) + numpy.arctan(rr)/rr)
def get_potential_at_point(self, eps, x, y, z):
disk = self.flattened_potential(x,y,z,
0.0|units.kpc, 0.277|units.kpc, 1.12E+10|units.MSun)
bulge = self.flattened_potential(x,y,z,
3.7|units.kpc, 0.20|units.kpc, 8.07E+10|units.MSun)
halo = self.halo_potential(x,y,z,
Mc=5.0E+10|units.MSun, Rc=6.0|units.kpc)
return disk + bulge + halo
class Galactic_Center_Potential_Kruijssen(Abstract_Potential):
"""
Following Kruijssen et al 2014, which uses the enclosed mass
profile from Launhardt et al 2002.
Note that the mass profile only extends to 487.9 parsec from
the galactic center, (and only to 487.9 * 0.63 parsec in the z direction).
Outside this range, a different potential should be used.
"""
def __init__(self, q=0.63):
self.q = q
self.load_table()
def load_table(self, rescale=True):
table = """
# enclosed mass profile from Launhardt et al 2002,
# recreated and provided by Kruijssen 2014
# radius enclosed mass
# (parsec) (MSun)
0.0 0.0
0.6261 3298000
0.6945 3429000
0.7751 3636000
0.8756 3855000
0.9712 4088000
1.077 4335000
1.187 4552000
1.293 4826000
1.408 5019000
1.534 5374000
1.671 5754000
1.820 6101000
1.994 6469000
2.198 6995000
2.424 7563000
2.705 8178000
2.964 8842000
3.268 9561000
3.625 1.033E7
3.996 1.128E7
4.406 1.232E7
4.798 1.332E7
5.131 1.413E7
5.487 1.498E7
6.013 1.558E7
6.752 1.635E7
7.489 1.717E7
8.409 1.803E7
9.327 1.894E7
10.28 2.028E7
11.54 2.214E7
13.04 2.441E7
14.91 2.718E7
17.16 3.026E7
19.98 3.402E7
22.99 3.939E7
26.13 4.516E7
29.35 5.229E7
32.35 5.995E7
35.02 6.806E7
38.61 8.036E7
43.09 9.865E7
47.51 1.199E8
51.74 1.429E8
57.75 1.789E8
64.84 2.329E8
71.92 2.973E8
81.25 3.947E8
91.79 5.188E8
99.97 6.246E8
109.5 7.743E8
120.0 9.142E8
133.9 1.068E9
152.2 1.237E9
179.5 1.489E9
206.5 1.741E9
243.4 2.056E9
283.5 2.289E9
332.3 2.573E9
382.3 2.893E9
413.8 3.098E9
456.2 3.449E9
487.9 3.694E9
1e6 3.694E9
"""
stream = StringIO(table)
radius, enclosed_mass = numpy.loadtxt(stream, unpack=True)
if rescale:
""" See footnote 18 at the bottom of page 1076 of Kruijssen """
factor = 8.3/8.5
radius *= factor
enclosed_mass *= factor**2
self.radius = radius | units.parsec
self.enclosed_mass_profile = enclosed_mass | units.MSun
def enclosed_mass(self, r):
try:
index = quantities.searchsorted(self.radius, r)
except ValueError:
""" This error is usually thrown when r has dimension > 1 """
shape = r.shape
r_flat = r.flatten()
index = quantities.searchsorted(self.radius, r_flat)
index.reshape(shape)
mass_below = self.enclosed_mass_profile[index-1]
mass_above = self.enclosed_mass_profile[index]
radius_below = self.radius[index-1]
radius_above = self.radius[index]
# Linear interpolation in log space
log_m_over_mb = numpy.log(mass_above/mass_below) * numpy.log(r/radius_below) / numpy.log(radius_above/radius_below)
enclosed_mass = numpy.nan_to_num(numpy.exp(log_m_over_mb)) * mass_below
return enclosed_mass
def get_potential_at_point(self, eps, x, y, z):
"""
Note that this potential is not entirely consistent with
get_gravity_at_point (which should be used) because
there a second coordinate transformation is used to flatten
the "potential".
"""
r = (x**2 + y**2 + z**2/self.q**2).sqrt()
mass = self.enclosed_mass(r)
# missing a term with the integrated contribution from outside shells
return self.point_mass_potential(r, mass)
def get_gravity_at_point(self, eps, x,y,z):
"""
Overwrites the default to add a second coordinate transformation.
"""
r = (x**2 + y**2 + z**2/self.q**2).sqrt()
mass = self.enclosed_mass(r)
unit_vector = []|x.unit
for var in (x, y, z):
unit_vector.append(var)
unit_vector = unit_vector / (x**2 + y**2 + z**2).sqrt()
unit_vector[2] *= 1./self.q**2
return self.point_mass_gravity(r, mass, unit_vector)
class Position_In_Potential(Abstract_Potential):
"""
Wrapper around any other potential that has a test particle.
Any call to get_potential_at_point will shift the coordinates to
put the center on the location of that test particle.
The particle is put in a Particles set to allow channels to be used,
however, only a single particle is allowed at a time.
"""
def __init__(self, potential, particle=None):
self.potential = potential
if particle is None:
particle = Particle()
particle.position = [0., 0., 0.] | units.parsec
particle.velocity = [0., 0., 0.] | units.kms
self.particles = Particles()
self.particles.add_particle(particle)
@property
def particle(self):
return self.particles[0]
@particle.setter
def particle(self, particle):
self.particles.remove_particles(self.particles)
self.particles.add_particle(particle)
def get_potential_at_point(self, eps, x, y, z):
px, py, pz = self.particle.position
return self.potential.get_potential_at_point(eps, x+px, y+py, z+pz)
| 8,764
| 34.630081
| 123
|
py
|
amuse
|
amuse-main/src/amuse/ext/boss_bodenheimer.py
|
import numpy
from amuse.ext.evrard_test import uniform_unit_sphere
from amuse.units import nbody_system
from amuse.units import generic_unit_system
from amuse.units import units
from amuse import datamodel
class bb79_cloud(object):
"""
spherical uniformly rotating cloud of particles with density perturbation (m=2)
-- initial conditions for the 'standard isothermal test case'
Boss & Bodenheimer (1979, http://adsabs.harvard.edu/abs/1979ApJ...234..289B)
-> binary fragmentation during isothermal collapse
arguments:
targetN -- intended number of particles
omega -- angular velocity (cloud rotates as a rigid body around the z-axis),
given in units of base rad/s if a converter is given, in 1./nbody_system.time if no converter is given
rho_perturb -- amplitude of the density perturbation
ethep_ratio -- ratio between total thermal and potential enegry
convert_nbody -- to set the Nbody units
base_grid -- base grid
Default values set as in Boss & Bodenheimer (1979).
In the first step, uniform sphere of particles is generated; then the azimuth of
particles is changed (while radius is kept constant) to achieve the cosine density
perturbation. See Kitsionas (2003, sec. 3.1,
http://adsabs.harvard.edu/abs/2003PhDT.......219K)
"""
def __init__(self, targetN=10000, omega=0.775066020047 | nbody_system.time**-1,
rho_perturb=0.5, ethep_ratio=0.25, convert_nbody=None, base_grid=None):
self.targetN=targetN
if convert_nbody is not None:
omega=convert_nbody.to_nbody(omega)
self.omega=omega.value_in(1./nbody_system.time)
self.rho_peturb=rho_perturb
self.ethep_ratio=ethep_ratio
self.convert_nbody=convert_nbody
self.base_grid=base_grid
def new_model(self):
base_sphere=uniform_unit_sphere(self.targetN,base_grid=self.base_grid)
x_uni,y_uni,z=base_sphere.make_xyz()
self.actualN=len(x_uni)
rad=numpy.sqrt(x_uni**2 + y_uni**2)
phi=numpy.arctan2(y_uni,x_uni)
n_vec=2000
phi_new_vec=numpy.linspace(-numpy.pi, numpy.pi, n_vec)
phi_old_vec=phi_new_vec + self.rho_peturb*(numpy.sin(2.*phi_new_vec)/2.)
phi_new=numpy.interp(phi,phi_old_vec,phi_new_vec)
x=rad*numpy.cos(phi_new)
y=rad*numpy.sin(phi_new)
rad=numpy.sqrt(x**2 + y**2)
phi=numpy.arctan2(y,x)
vel=self.omega*rad
vx=-vel*numpy.sin(phi)
vy= vel*numpy.cos(phi)
vz=0.
mass=numpy.ones_like(x)/self.actualN
Ep=3./5
self.internalE=Ep*self.ethep_ratio
internal_energy=numpy.ones_like(x)*self.internalE
return (mass,x,y,z,vx,vy,vz,internal_energy)
@property
def result(self):
mass,x,y,z,vx,vy,vz,u = self.new_model()
result = datamodel.Particles(self.actualN)
result.mass = nbody_system.mass.new_quantity(mass)
result.x = nbody_system.length.new_quantity(x)
result.y = nbody_system.length.new_quantity(y)
result.z = nbody_system.length.new_quantity(z)
result.vx = nbody_system.speed.new_quantity(vx)
result.vy = nbody_system.speed.new_quantity(vy)
result.vz = nbody_system.speed.new_quantity(vz)
result.u = (nbody_system.speed**2).new_quantity(u)
if not self.convert_nbody is None:
result = datamodel.ParticlesWithUnitsConverted(result, self.convert_nbody.as_converter_from_si_to_generic())
result = result.copy()
return result
| 3,386
| 36.21978
| 116
|
py
|
amuse
|
amuse-main/src/amuse/ext/ClusterCore.py
|
"""
Stefan Umbreit, python version
.. [#] Casertano, S., Hut, P., *The Astrophysical Journal*, **298**, 80-94 (1985)
"""
import numpy as N
def density_estimators(n_points, r, mass, dims=3):
"""
Calculate the density estimators that are used for the calculation of the
core quantities.
n_points - number of points to average over
r, mass - radial positions and masses of the stars
(both arrays must be sorted radially)
"""
# calculate the total mass of all stars
m_tot= N.sum(mass)
nave= N.argmin(N.abs(0.5*m_tot-N.cumsum(mass)))
rhoj= N.zeros((nave,2), N.float64)
for i in range(nave):
jmin= max(i-n_points/2, 0)
jmax= min(jmin + n_points, nave-1)
mrho= 0.
#this is equivalent to their J-1 factor for the case of equal masses,
#and seems like a good generalization for unequal masses
mrho= N.sum(mass[jmin+1:jmax])
if dims==3:
Vrj = 4.0/3.0 * N.pi * (r[jmax]**3 - r[jmin]**3)
elif dims==2:
Vrj = N.pi*(r[jmax]**2- r[jmin]**2)
rhoj[i,1]= mrho/Vrj
rhoj[i,0]= r[i]
return(rhoj)
def core_radius(rhoj):
"""
Calculates the core size using estimators of the density.
rhoj - Density estimators (use density_estimator function)
"""
return N.sum(rhoj[:,0]*rhoj[:,1])/N.sum(rhoj[:,1])
| 1,290
| 23.826923
| 81
|
py
|
amuse
|
amuse-main/src/amuse/ext/job_server.py
|
"""
JobServer: class for job farming using amuse communication channels
usage:
start jobserver
jobserver=JobServer(hosts=<list of hostnames> [ ,channel_type="mpi", preamble="<commands>", retry_jobs=True/ False] )
submit job
job=jobserver.submit_job(somework, (args,))
wait for one result (encounters all):
jobserver.wait()
job=jobserver.last_finished_job
wait for all to finish, loop over all:
jobserver.waitall()
for job in jobserver.finished_jobs:
print job.result
it is essential that the function which are to be executed remotely are pickleable, i.e. they must not be
derived from the main module. Easy way to achieve this is to import them from a seperate file.
2 issues to be fixed:
- blocking startup of hosts may prevent threads shutting down, leading to freeze at end of script
(so manual kill necessary)
- thread function _startup contains references to JobServer, hence del jobserver will actually not be called
until the situation of issue 1 is resolved (so the warning given there is useless)
"""
from amuse.rfi.core import *
import pickle
from amuse.rfi.async_request import AsyncRequestsPool
import inspect
from collections import deque
import threading
from time import sleep
import warnings
import base64
def dump_and_encode(x):
return base64.b64encode(pickle.dumps(x)).decode()
def decode_and_load(x):
return pickle.loads(base64.b64decode(x.encode()))
class RemoteCodeException(Exception):
def __init__(self,ex=None):
self.ex=ex
def __str__(self):
return "["+self.ex.__class__.__name__+"] "+str(self.ex)
class RemoteCodeImplementation(object):
def __init__(self):
self.scope={}
self.scope['dump_and_encode']=dump_and_encode
self.scope['decode_and_load']=decode_and_load
def _exec(self,arg):
try:
exec(arg, self.scope)
return dump_and_encode(None)
except Exception as ex:
return dump_and_encode(RemoteCodeException(ex))
def _eval(self,arg,argout):
try:
self.scope.update(dict(arg=arg))
exec("argout="+arg, self.scope)
argout.value=eval("dump_and_encode(argout)",self.scope)
return dump_and_encode(None)
except Exception as ex:
argout.value=dump_and_encode("")
return dump_and_encode(RemoteCodeException(ex))
def _assign(self,lhs,argin):
try:
self.scope.update(dict(argin=argin))
exec(lhs+"=decode_and_load(argin)", self.scope)
return dump_and_encode(None)
except Exception as ex:
return dump_and_encode(RemoteCodeException(ex))
def _func(self,func,argin,kwargin,argout):
try:
self.scope.update(dict(func=func,argin=argin,kwargin=kwargin))
exec("func=decode_and_load(func)", self.scope)
exec("arg=decode_and_load(argin)", self.scope)
exec("kwarg=decode_and_load(kwargin)", self.scope)
exec("result=func(*arg,**kwarg)", self.scope)
argout.value=eval("dump_and_encode(result)",self.scope)
return dump_and_encode(None)
except Exception as ex:
argout.value=dump_and_encode(None)
return dump_and_encode(RemoteCodeException(ex))
class RemoteCodeInterface(PythonCodeInterface):
def __init__(self, **options):
PythonCodeInterface.__init__(self, RemoteCodeImplementation, **options)
@legacy_function
def _func():
function = LegacyFunctionSpecification()
function.addParameter('func', dtype='string', direction=function.IN)
function.addParameter('argin', dtype='string', direction=function.IN)
function.addParameter('kwargin', dtype='string', direction=function.IN)
function.addParameter('argout', dtype='string', direction=function.OUT)
function.result_type = 'string'
return function
@legacy_function
def _exec():
function = LegacyFunctionSpecification()
function.addParameter('arg', dtype='string', direction=function.IN)
function.result_type = 'string'
return function
@legacy_function
def _eval():
function = LegacyFunctionSpecification()
function.addParameter('arg', dtype='string', direction=function.IN)
function.addParameter('argout', dtype='string', direction=function.OUT)
function.result_type = 'string'
return function
@legacy_function
def _assign():
function = LegacyFunctionSpecification()
function.addParameter('lhs', dtype='string', direction=function.IN)
function.addParameter('argin', dtype='string', direction=function.IN)
function.result_type = 'string'
return function
def execute(self,express):
err=decode_and_load( self._exec(express) )
if err:
raise err
def assign(self,lhs,arg):
err=decode_and_load( self._assign(lhs, dump_and_encode(arg)) )
if err:
raise err
def evaluate(self,express):
result,err=self._eval(express)
err=decode_and_load( err)
if err :
raise err
return decode_and_load(result)
def func(self,f,*args,**kwargs):
result,err=self._func( dump_and_encode(f),
dump_and_encode(args),
dump_and_encode(kwargs) )
err=decode_and_load( err)
if err :
raise err
return decode_and_load(result)
def async_func(self,f,*args,**kwargs):
request=self._func.asynchronous(dump_and_encode(f),
dump_and_encode(args),
dump_and_encode(kwargs) )
def f(x):
result,err=x()
err=decode_and_load( err)
if err :
raise err
return decode_and_load(result)
request.add_result_handler( f )
return request
class Job(object):
def __init__(self, f, args, kwargs,retries=0):
self.f=f
self.args=args
self.kwargs=kwargs
self.result=None
self.request=None
self.err=None
self.retries=retries
class JobServer(object):
def __init__(self,hosts=[],channel_type="mpi",preamble=None, retry_jobs=True,
no_wait=True,verbose=True,max_retries=2, use_threading=False):
self.hosts=[]
self.job_list=deque()
self.idle_codes=[]
self.retry_jobs=retry_jobs
self.max_retries=max_retries
self._finished_jobs=deque()
self.preamble=preamble
self.pool=AsyncRequestsPool()
self.number_available_codes=0
self.number_starting_codes=0
self.no_wait=no_wait
self.last_finished_job=None
self.use_threading=use_threading
self.verbose=verbose
if self.verbose:
print("AMUSE JobServer launching")
self.add_hosts(hosts=hosts,channel_type=channel_type)
def no_hosts(self):
if self.number_available_codes==0 and self.number_starting_codes==0:
return True
return False
def add_hosts(self,hosts=[],channel_type="mpi"):
self.hosts.append(hosts)
if self.verbose:
print("JobServer: connecting %i hosts"%len(hosts))
if not self.use_threading:
for host in hosts:
self.number_starting_codes+=1
self._startup( channel_type=channel_type,hostname=host,label=host,
copy_worker_code=True,redirection="none" )
else:
threads=[]
for host in hosts:
kwargs=dict( channel_type=channel_type,hostname=host,label=host,
copy_worker_code=True,redirection="none" )
threads.append( threading.Thread(target=self._startup,kwargs=kwargs) )
for thread in threads:
self.number_starting_codes+=1
thread.daemon=True
thread.start()
if not self.no_wait:
if self.verbose:
print("... waiting")
for thread in threads:
thread.join()
else:
if self.verbose:
print("... waiting for first available host")
while self.number_available_codes==0 and self.number_starting_codes>0:
sleep(0.1)
if self.no_wait:
if self.verbose:
print("JobServer: launched")
else:
if self.verbose:
print("JobServer: launched with", len(self.idle_codes),"hosts")
def _startup(self, *args,**kwargs):
try:
code=RemoteCodeInterface(*args,**kwargs)
except Exception as ex:
self.number_starting_codes-=1
print("JobServer: startup failed on", kwargs['hostname'] or "default")
print(ex)
else:
if self.preamble is not None:
code.execute(self.preamble)
self.number_available_codes+=1
self.number_starting_codes-=1
if self.no_wait:
if self.number_available_codes & (self.number_available_codes-1) ==0:
if self.verbose:
print("JobServer: hosts now available:",self.number_available_codes)
if self.number_starting_codes==0:
if self.verbose:
print("JobServer: hosts in total:", self.number_available_codes)
if self.job_list:
self._add_job(self.job_list.popleft(), code)
else:
self.idle_codes.append(code)
def exec_(self,arg):
while self.number_starting_codes>0:
sleep(0.1)
self.waitall()
for code in self.idle_codes:
code.execute(arg)
def submit_job(self,f,args=(),kwargs={}):
if len(self.pool)==0 and not self.job_list:
if self.verbose:
print("JobServer: submitting first job on queue")
job=Job(f,args,kwargs)
self.job_list.append( job)
if self.idle_codes:
self._add_job(self.job_list.popleft(), self.idle_codes.pop())
return job
def wait(self):
if self._finished_jobs:
self.last_finished_job=self._finished_jobs.popleft()
return True
elif len(self.pool)==0 and not self.job_list:
if self.verbose:
print("JobServer: no more jobs on queue or running")
return False
else:
while len(self.pool)==0 and self.job_list:
if self.number_available_codes>0:
raise Exception("JobServer: this should not happen")
if self.number_starting_codes==0:
raise Exception("JobServer: no codes available")
self.pool.wait()
self.last_finished_job=self._finished_jobs.popleft()
return True
def waitall(self):
while len(self.pool)==0 and self.job_list:
if self.number_available_codes>0:
raise Exception("JobServer: this should not happen")
if self.number_starting_codes==0:
raise Exception("JobServer: no codes available")
while len(self.pool)>0 or self.job_list:
self.pool.wait()
self.last_finished_job=self._finished_jobs[-1]
@property
def finished_jobs(self):
while self._finished_jobs:
yield self._finished_jobs.popleft()
def _finalize_job(self,request,job,code):
try:
job.result=request.result()
job.err=None
except Exception as ex:
job.result=None
job.err=ex
if job.err and not isinstance(job.err,RemoteCodeException):
del code
self.number_available_codes-=1
if self.retry_jobs and job.retries<self.max_retries:
retry=Job(job.f,job.args,job.kwargs,job.retries+1)
self.job_list.append(retry)
else:
self.idle_codes.append(code)
if self.job_list and self.idle_codes:
self._add_job( self.job_list.popleft(), self.idle_codes.pop())
if not self.job_list:
if self.verbose:
print("JobServer: last job dispatched")
self._finished_jobs.append(job)
def _add_job(self,job,code):
job.request=code.async_func(job.f,*job.args,**job.kwargs)
self.pool.add_request(job.request,self._finalize_job, [job,code])
def __del__(self):
if not self.no_hosts():
self.waitall()
if self.job_list:
warnings.warn("JobServer: Warning: shutting down with unfinished jobs")
for code in self.idle_codes:
code.stop()
if self.number_starting_codes>0:
warnings.warn("JobServer: Warning: some hosts startup threads possibly blocking")
| 12,419
| 34.084746
| 119
|
py
|
amuse
|
amuse-main/src/amuse/ext/sph_to_star.py
|
import numpy
from amuse.units import constants, units
from amuse.datamodel import Grid
class SPH2StellarModel(object):
"""
Converts a set of SPH particles to a 1D stellar evolution model. If the SPH
model of the star included a (non-SPH) 'core' particle, supply it via the
optional core_particle keyword argument.
SPH particles are sorted using the pressure
Useful for continuing the stellar evolution of merged stars.
:argument sph_particles: The SPH particles to be converted to a stellar model
:argument core_particle: Gravitational particle representing the stellar core (optional)
"""
def __init__(self, sph_particles, core_particle = None, particles_per_zone=1):
self.sph_particles = sph_particles
self.core_particle = core_particle
self.particles_per_zone = particles_per_zone
def derive_stellar_structure(self):
sorted = self.sph_particles.pressure.argsort()[::-1]
binned = sorted.reshape((-1, self.particles_per_zone))
stellar_model = Grid(binned.shape[0])
stellar_model.dmass = self.sph_particles.mass[binned].sum(axis=1)
stellar_model.mass = stellar_model.dmass.accumulate()
stellar_model.pressure= self.sph_particles.pressure[binned].sum(axis=1)
stellar_model.rho = stellar_model.dmass / (self.sph_particles.mass / self.sph_particles.density)[binned].sum(axis=1)
stellar_model.radius = ((3 / (4 * numpy.pi)) * stellar_model.dmass / stellar_model.rho).accumulate()**(1.0/3.0) * 1
stellar_model.temperature = ((self.sph_particles.mass * self.sph_particles.u * self.sph_particles.mu)[binned].sum(axis=1) /
(1.5 * constants.kB * stellar_model.dmass)).as_quantity_in(units.K)
zeros = numpy.zeros(len(stellar_model.dmass))
stellar_model.luminosity = zeros - 1 | units.LSun
attribute_names = self.sph_particles.get_attribute_names_defined_in_store()
for attribute, name in [("h1", "X_H"), ("he4", "X_He"), ("c12", "X_C"), ("n14", "X_N"),
("o16", "X_O"), ("ne20", "X_Ne"), ("mg24", "X_Mg"), ("si28", "X_Si"), ("fe56", "X_Fe")]:
if attribute in attribute_names:
setattr(stellar_model, name, (self.sph_particles.mass * getattr(self.sph_particles, attribute))[binned].sum(axis=1) / stellar_model.dmass)
else:
setattr(stellar_model, name, zeros)
return stellar_model
def convert_SPH_to_stellar_model(sph_particles, **keyword_arguments):
"""
Converts a set of SPH particles to a 1D stellar evolution model. If the SPH
model of the star included a (non-SPH) 'core' particle, supply it via the
optional core_particle keyword argument (Not yet supported).
SPH particles are sorted using the pressure.
Useful for continuing the stellar evolution of merged stars.
:argument sph_particles: The SPH particles to be converted to a stellar model
:argument core_particle: Gravitational particle representing the stellar core (optional)
:argument particles_per_zone: The number of sph particles within each mesh cell (default=1)
"""
converter = SPH2StellarModel(sph_particles, **keyword_arguments)
return converter.derive_stellar_structure()
| 3,314
| 49.227273
| 154
|
py
|
amuse
|
amuse-main/src/amuse/ext/stellar_tidal_evolution.py
|
# Equations for tidal evolution of binary/planetary orbits
# From Hansen 2010, based on Eggleton et al. 1998
import threading
import numpy
import math
from scipy import integrate
from amuse.lab import *
from amuse.support import literature
#2010ApJ...723..285H
def sigma_planet():
sig_p = 3.4e-7
return 5.9e-54 * sig_p | units.g**-1 *units.cm**-2 * units.s**-1
#2010ApJ...723..285H
def sigma_star():
sig_s = 7.8e-8
#sig_s = 1e+9 # Earth
return 6.4e-59 * sig_s | units.g**-1 *units.cm**-2 * units.s**-1
def angular_frequency(Ms, Mp, a):
return (constants.G*(Ms+Mp)/(a**3))**(0.5)
def J_orb(Ms, Mp, a, e):
return Ms*Mp*(constants.G*a*(1-e**2)/(Ms + Mp))**0.5
def interp(x, *args, **kwds):
if type(x) in (float, int):
return numpy.interp([x], *args, **kwds).item()
else :
return numpy.interp(x, *args, **kwds)
class TidalEvolution(literature.LiteratureReferencesMixIn):
"""
Tidal evolution between a planet(esimal) and a star.
Based on
.. [#] ** 2010ApJ...723..285H
.. [#] **
"""
def __init__(self, central_particle=Particles()):
literature.LiteratureReferencesMixIn.__init__(self)
self.current_time = 0 | units.s
self.pericenter_interaction_factor = 4
self.central_particle = central_particle
if not hasattr(self.central_particle, "gyration_radius_sq"):
self.central_particle.gyration_radius_sq = 0.2
if hasattr(self.central_particle, "radius"):
self.central_particle.old_radius = self.central_particle.radius
self.orbiters = Particles(0)
if not hasattr(self.central_particle, "Omega"):
self.central_particle.Omega = 2.6e-6|units.s**-1
self.all_merged_orbiters = Particles()
@property
def particles(self):
return ParticlesSuperset([self.central_particle, self.orbiters])
def set_current_time(self, time):
self.current_time = time
def get_semimajor_axis(self):
return self.semimajor_axis
def get_eccentricity(self):
return self.eccentricity
def get_pericenter_interaction_factor(self):
return self.pericenter_interaction_factor
def orbital_evolution_time_scale(self):
e = self.orbiters[0].eccentricity
a = self.orbiters[0].semimajor_axis
m = self.orbiters[0].mass
r = self.orbiters[0].radius
M = self.central_particle[0].mass
R = self.central_particle[0].radius
O = self.central_particle[0].Omega
Op = angular_frequency(M, m, a)
dt = abs(e/self.edot_star(a, M, R, m, r, O, Op, e))
return dt
def add_particles(self, p):
self.orbiters.add_particles(p)
def delete_particles(self, p):
self.orbiters.removel_particles(p)
def evolve_model(self, time):
M = self.central_particle.mass
R = self.central_particle.radius
Os = self.central_particle.Omega
interacting_bodies = self.orbiters.select(lambda a, e: a*(1-e)<self.pericenter_interaction_factor*R,["semimajor_axis", "eccentricity"])
if len(interacting_bodies):
print("N tidal:", len(self.orbiters), "of which N=", len(interacting_bodies), "tidally interacting")
self.orbiters_with_error = Particles()
"""
#Interestingly enough, integrate.quadpack is not thread-safe.
# AvanE and SPZ, 7 Jan 2014
#
threads = []
nproc = 3
Nper_proc = max(1, len(interacting_bodies)/nproc)
print "npp=", Nper_proc
for offset in range(0, len(interacting_bodies), Nper_proc):
subset = interacting_bodies[offset:offset+Nper_proc].copy()
print "lss=", len(subset)
thread = threading.Thread(target=self.evolve_multiple_orbiters, args=[subset, time, M, R, Os])
threads.append(thread)
for ti in threads:
ti.start()
for ti in threads:
ti.join()
"""
for pi in interacting_bodies:
self.evolve_individual_orbiter(pi, time, M, R, Os)
print("Central_particle:", self.central_particle.Omega)
self.current_time = time
if len(interacting_bodies):
print("Post tidal interaction", len(interacting_bodies))
if len(self.orbiters_with_error)>0:
print("Error in N=", len(self.orbiters_with_error), "orbiters.")
print(self.orbiters_with_error)
merged_orbiters = interacting_bodies.select(lambda a, e, r: a*(1-e)<R+r,["semimajor_axis", "eccentricity", "radius"])
self.all_merged_orbiters = Particles()
if len(merged_orbiters)>0:
print("Merged orbiters N= ", len(merged_orbiters))
print(merged_orbiters)
self.all_merged_orbiters.add_particles(merged_orbiters-self.orbiters_with_error)
def contains_nan(self, dO):
has_nan = False
for xi in dO:
if math.isnan(xi):
has_nan = True
return has_nan
def evolve_multiple_orbiters(self, interacting_bodies, time, M, R, Os):
for pi in interacting_bodies:
print("ev=", pi.key)
self.evolve_individual_orbiter(pi, time, M, R, Os)
def evolve_individual_orbiter(self, oi, time, M, R, Os):
dOmega = zero
m = oi.mass
r = oi.radius
current_time = self.current_time
while current_time<time:
#print "Time=", oi.name, current_time, oi.semi_major_axis, oi.eccentricity, self.central_particle.Omega
a = oi.semimajor_axis
e = oi.eccentricity
Op = angular_frequency(M, m, a)
dt = time-current_time
dt = min(dt, abs(e/self.edot_star(a, M, R, m, r, Os, Op, e)) )
t_end = current_time + dt
da = integrate.quad(lambda x: self.adot_star(a, M, R, m, r, Os, Op, e).value_in(units.RSun/units.s), current_time.value_in(units.s), t_end.value_in(units.s))
de = integrate.quad(lambda x: self.edot_star(a, M, R, m, r, Os, Op, e).value_in(units.s**-1), current_time.value_in(units.s), t_end.value_in(units.s))
dO = integrate.quad(lambda x: self.Omegadot_star(a, M, R, m, r, Os, Op, e).value_in(units.s**-2), current_time.value_in(units.s), t_end.value_in(units.s))
if self.contains_nan(da):
print("NAN's detected in da", da)
self.orbiters_with_error.add_particles(oi.as_set())
break
if self.contains_nan(de):
print("NAN's detected, de", de)
self.orbiters_with_error.add_particles(oi.as_set())
break
if self.contains_nan(dO):
print("NAN's detectedm dO", dO)
self.orbiters_with_error.add_particles(oi.as_set())
break
oi.semimajor_axis += da[0] | units.RSun
oi.eccentricity += de[0]
if oi.eccentricity<0:
oi.eccentricity = 0
dOmega += dO[0] | units.s**-1
######oi.age += dt
current_time += dt
#print "Time=", oi.name, current_time, oi.semi_major_axis, oi.eccentricity, self.central_particle.Omega
self.central_particle.Omega += dOmega
# self.central_particle.Omega = self.central_particle.Omega * (self.central_particle.old_radius/self.central_particle.radius)**2
def J_star(self, Ms, Rs, Omega_s):
k2s = self.central_particle[0].gyration_radius_sq
Is = k2s*Ms*Rs*Rs
return Is*Omega_s
def J_planet(self, Mp, Rp, k2p, Omega_p):
k2p = 0.2 # depends on the planet
Ip = k2p*Mp*Rp*Rp
return Ip*Omega_p
# timescale for particle with mass Mb
def tidal_timescale(self, Ma, Mb, Rb, a, sigma):
denominator = (9*Ma*(Ma + Mb)*(Rb**10)*sigma)
t_tidal = float("infinity") | units.s
if not denominator==zero:
t_tidal = Mb*(a**8) / denominator
return t_tidal
def Tp(self, Ms, Mp, Rp, a):
return self.tidal_timescale(Ms, Mp, Rp, a, sigma_planet())
def Ts(self, Ms, Mp, Rs, a):
return self.tidal_timescale(Mp, Ms, Rs, a, sigma_star())
def adot_planet(self, a, Ms, Rs, Mp, Rp, Omega_s, Omega_p, e):
T_p = self.Tp(Ms, Mp, Rp, a)
omega = angular_frequency(Ms, Mp, a)
return -(a/T_p) * (self.f1(e) - Omega_p/omega * self.f2(e))
def adot_star(self, a, Ms, Rs, Mp, Rp, Omega_s, Omega_p, e):
T_s = self.Ts(Ms, Mp, Rs, a)
omega = angular_frequency(Ms, Mp, a)
adot = -(a/T_s) * (self.f1(e) - Omega_s/omega * self.f2(e))
return adot
def edot_planet(self, a, Ms, Rs, Mp, Rp, Omega_s, Omega_p, e):
T_p = self.Tp(Ms, Mp, Rp, a)
omega = angular_frequency(Ms, Mp, a)
return -9./2. * e/T_p * (self.f3(e) - 11./18. * Omega_p/omega * self.f4(e))
def edot_star(self, a, Ms, Rs, Mp, Rp, Omega_s, Omega_p, e):
T_s = self.Ts(Ms, Mp, Rs, a)
omega = angular_frequency(Ms, Mp, a)
edot = -9./2. * e/T_s * (self.f3(e) - 11./18. * Omega_s/omega * self.f4(e))
return edot
def Omegadot_planet(self, a, Ms, Rs, Mp, Rp, Omega_s, Omega_p, e):
T_p = self.Tp(Ms, Mp, Rp, a)
omega = angular_frequency(Ms, Mp, a)
gamma = J_orb(Ms, Mp, a, e)/self.J_planet(Mp, Rp, Omega_p)
return gamma/2. * Omega_p/T_p * (self.f5(e) - Omega_p/omega * self.f6(e))
def Omegadot_star(self, a, Ms, Rs, Mp, Rp, Omega_s, Omega_p, e):
T_s = self.Ts(Ms, Mp, Rs, a)
omega = angular_frequency(Ms, Mp, a)
gamma = J_orb(Ms, Mp, a, e)/self.J_star(Ms, Rs, Omega_s)
return gamma/2. * Omega_s/T_s * (self.f5(e) - Omega_s/omega * self.f6(e))
def f1(self, e):
res = 1
if (e > 0):
res = (1 + 31./2. *e*e + 255./8. *(e**4) + 185./16. *(e**6) + 25./64. *(e**8)) \
/math.pow((1 - e*e), 7.5)
return res
def f2(self, e):
res = 1
if (e > 0):
res = (1 + 15./2. *e*e + 45./8. *math.pow(e,4) + 5./16. *math.pow(e, 6)) \
/math.pow((1 - e*e), 6)
return res
def f3(self, e):
res = 1
if (e > 0):
res = (1 + 15./4. * e*e + 15./8. *math.pow(e,4) + 5./64. *math.pow(e, 6)) \
/math.pow((1 - e*e), 6.5)
return res
def f4(self, e):
res = 1
if (e > 0):
res = (1 + 3./2. *e*e + 1./8. *math.pow(e,4)) /math.pow((1 - e*e), 5)
return res
def f5(self, e):
res = 1
if (e > 0):
res = (1 + 15./2. * e*e + 45./8. *math.pow(e,4) + 5./16. *math.pow(e, 6)) \
/math.pow((1 - e*e), 6.5)
return res
def f6(self, e):
res = 1
if (e > 0):
res = (1 + 3 * e*e + 3./8. *math.pow(e,4)) /math.pow((1 - e*e), 5)
return res
#import unittest
from amuse.test.amusetest import TestCase
class TestTidalInteraction(TestCase):
def test_remove_orbiters(self):
M = 1 | units.MSun
m = 1 | units.MJupiter
a = 1 | units.AU
e = 0.99
Omega_s = 2.6e-6|units.s**-1
star = Particles(1)
star.mass = M
star.Omega = Omega_s
star.stellar_type = 1|units.stellar_type
stellar = SeBa()
stellar.particles.add_particles(star)
channel_from_se_to_framework = stellar.particles.new_channel_to(star)
channel_from_se_to_framework.copy_attributes(["age", "mass", "radius", "luminosity", "temperature", "stellar_type"])
tidal = TidalEvolution(star)
planet = Particles(2)
planet.mass = [1, 100] * m
planet.radius = [0.001, 0.001] |units.RSun
planet.semimajor_axis = [1, 5.2] * a
planet.eccentricity = [e, 0.1]
tidal.add_particles(planet)
channel_from_tc_to_framework = tidal.central_particle.new_channel_to(star)
channel_from_to_to_framework = tidal.orbiters.new_channel_to(planet)
channel_from_framework_to_tc = star.new_channel_to(tidal.central_particle)
channel_from_framework_to_to = planet.new_channel_to(tidal.orbiters)
dt = 1|units.Myr
time = 0*dt
He_WD = 10 | units.stellar_type
while star.stellar_type<He_WD:
print("T=", stellar.model_time)
stellar.particles.evolve_one_step()
time = stellar.particles.age
adiabatic_expansion_factor = star[0].mass/stellar.particles[0].mass
tidal.central_particle.mass = stellar.particles[0].mass
star[0].Omega = star[0].Omega * (star[0].radius/stellar.particles[0].radius)**2
planet.semimajor_axis *= adiabatic_expansion_factor
channel_from_se_to_framework.copy_attributes(["age", "mass", "radius", "luminosity", "temperature", "stellar_type"])
channel_from_framework_to_tc.copy_attributes(["mass", "radius", "Omega"])
channel_from_framework_to_to.copy_attributes(["semimajor_axis"])
tidal.central_particle.Omega = star[0].Omega
tidal.central_particle.radius = star[0].radius
tidal.evolve_model(time)
channel_from_to_to_framework.copy_attributes(["semimajor_axis", "eccentricity"])
if len(tidal.orbiters_with_error)>0:
print("Remove orbiter with error:", len(tidal.orbiters_with_error))
tidal.orbiters.remove_particles(tidal.orbiters_with_error)
planet.remove_particle(tidal.orbiters_with_error)
if len(tidal.all_merged_orbiters)>0:
print("Merged planets/asteroids: N=", len(tidal.all_merged_orbiters))
print("removed:", tidal.all_merged_orbiters)
tidal.orbiters.remove_particles(tidal.all_merged_orbiters)
planet.remove_particle(tidal.all_merged_orbiters)
print("Remaining orbiters:", tidal.orbiters)
print("N=", len(tidal.orbiters))
self.assertEqual(len(tidal.orbiters), 1)
def tidal_interaction(M, m, a, e, Omega_s, tend):
star = Particles(1)
star.mass = M
star.Omega = Omega_s
star.stellar_type = 1|units.stellar_type
stellar = SeBa()
stellar.particles.add_particles(star)
channel_from_se_to_framework = stellar.particles.new_channel_to(star)
channel_from_se_to_framework.copy_attributes(["age", "mass", "radius", "luminosity", "temperature", "stellar_type"])
tidal = TidalEvolution(star)
planet = Particles(1)
planet.mass = 1*m
planet.radius = 0.001 |units.RSun
planet.semimajor_axis = 1.*a
planet.eccentricity = e
tidal.add_particles(planet)
channel_from_tc_to_framework = tidal.central_particle.new_channel_to(star)
channel_from_to_to_framework = tidal.orbiters.new_channel_to(planet)
channel_from_framework_to_tc = star.new_channel_to(tidal.central_particle)
channel_from_framework_to_to = planet.new_channel_to(tidal.orbiters)
# bodies = ParticlesSuperset([star, planet])
# dt = 1|units.Myr
dt = tidal.orbital_evolution_time_scale()
time = zero
while time<tend:
dt_se = stellar.particles[0].time_step
dt = min(dt, dt_se)
dt = max(1|units.Myr, dt)
print("dt_tidal=", dt)
time += dt
stellar.evolve_model(time)
adiabatic_expansion_factor = star[0].mass/stellar.particles[0].mass
tidal.central_particle.mass = stellar.particles[0].mass
star[0].Omega = star[0].Omega * (star[0].radius/stellar.particles[0].radius)**2
#expand planetary orbit due to stellar mass loss
planet.semimajor_axis *= adiabatic_expansion_factor
channel_from_se_to_framework.copy_attributes(["age", "mass", "radius", "luminosity", "temperature", "stellar_type"])
channel_from_framework_to_tc.copy_attributes(["mass", "radius", "Omega"])
channel_from_framework_to_to.copy_attributes(["semimajor_axis"])
tidal.central_particle.Omega = star[0].Omega
tidal.central_particle.radius = star[0].radius
tidal.evolve_model(time)
channel_from_to_to_framework.copy_attributes(["semimajor_axis", "eccentricity"])#, "merged_with_central_star"])
if len(tidal.all_merged_orbiters)>0:
print("Merged planets/asteroids: N=", len(tidal.all_merged_orbiters))
print("removed:", tidal.all_merged_orbiters)
tidal.orbiters.remove_particles(tidal.all_merged_orbiters)
planet.remove_particle(tidal.all_merged_orbiters)
print("Remaining orbiters:", tidal.orbiters)
print("N=", len(tidal.orbiters))
if len(tidal.orbiters)==0:
return
print("current time=", tidal.current_time, tidal.orbiters.semimajor_axis, tidal.orbiters.eccentricity)
def new_option_parser():
# from optparse import OptionParser
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-M", unit=units.MSun,
dest="M", type="float", default = 1|units.MSun,
help="stellar mass [%default]")
result.add_option("-m", unit=units.MJupiter,
dest="m", type="float", default = 0.001|units.MJupiter,
help="planet mass [%default]")
result.add_option("-a", unit=units.RSun,
dest="a", type="float", default = 1|units.RSun,
help="planet semi major axis [%default]")
result.add_option("-e",
dest="e", type="float", default = 0.6,
help="planet eccentricity [%default]")
result.add_option("-t", unit=units.Myr,
dest="tend", type="float", default = 1|units.Myr,
help="end time of integration [%default]")
result.add_option("-O", unit=units.s**-1,
dest="Omega_sun", type="float", default = 2.6e-6|units.s**-1,
help="Stellar angular something [%default]")
return result
if __name__ in ('__main__', '__plot__'):
set_printing_strategy("custom", #nbody_converter = converter,
preferred_units = [units.MSun, units.RSun, units.Myr],
precision = 11, prefix = "",
separator = " [", suffix = "]")
o, arguments = new_option_parser().parse_args()
tidal_interaction(o.M, o.m, o.a, o.e, o.Omega_sun, o.tend)
| 18,470
| 39.417943
| 169
|
py
|
amuse
|
amuse-main/src/amuse/ext/plummer.py
|
import warnings
from amuse.ic.plummer import *
warnings.warn("amuse.ext.plummer has moved to amuse.ic.plummer", DeprecationWarning)
| 134
| 21.5
| 84
|
py
|
amuse
|
amuse-main/src/amuse/ext/hydro_collision.py
|
import numpy
from amuse.units import units, nbody_system, constants
from amuse.datamodel import Particles
from amuse.plot import plot, scatter, xlabel, ylabel, native_plot
from amuse.ext.star_to_sph import convert_stellar_model_to_SPH
from amuse.ext.sph_to_star import convert_SPH_to_stellar_model
from amuse.community.kepler.interface import Kepler
from amuse.community.hop.interface import Hop
class StellarEncounterInHydrodynamics(object):
"""
Resolves collisions between stars by converting them to SPH models, let them
collide in an SPH code, and converting the resulting SPH particle distribution
back to a 1D stellar evolution model.
Requires a stellar evolution code to supply the internal structure of the
stars for the convert_stellar_model_to_SPH routine.
Requires a gravity code to set up the initial configuration. The stars in the
gravity code have typically already collided, so they are first "evolved" back
in time up to a certain separation, assuming Keplerian motion.
:argument number_of_particles: Total number of gas particles in the SPH simulation
:argument hydrodynamics: SPH code class for the simulation
:argument initial_separation: a factor relative to the sum of the radii (1 means in contact, default: 5)
"""
stellar_evolution_code_required = True
gravity_code_required = True
def __init__(
self,
number_of_particles,
hydrodynamics,
initial_separation = 5,
relax_sph_models = True,
verbose = False,
debug = False,
hydrodynamics_arguments = dict(),
hydrodynamics_parameters = dict(),
star_to_sph_arguments = dict(),
sph_to_star_arguments = dict(),
):
self.number_of_particles = number_of_particles
self.hydrodynamics = hydrodynamics
self.initial_separation = initial_separation
if not relax_sph_models:
self.relax = self.no_relax
self.verbose = verbose
self.debug = debug
self.hydrodynamics_arguments = hydrodynamics_arguments
self.hydrodynamics_parameters = hydrodynamics_parameters
self.star_to_sph_arguments = star_to_sph_arguments
self.sph_to_star_arguments = sph_to_star_arguments
self.dynamical_timescales_per_step = 1.0 # encounter_is_over check is performed at this interval
self.extra_steps_when_encounter_is_over = 3
self.continue_with_kepler = False
def handle_collision(self, primary, secondary, stellar_evolution_code=None, gravity_code=None):
particles = self.local_copy_of_particles(primary, secondary)
self.collect_required_attributes(particles, gravity_code, stellar_evolution_code)
self.backtrack_particles(particles)
gas_particles = self.convert_stars(particles, stellar_evolution_code)
self.simulate_collision(gas_particles)
self.models = [convert_SPH_to_stellar_model(group, **self.sph_to_star_arguments) for group in self.groups_after_encounter]
return self.new_particles_with_internal_structure_from_models()
def new_particles_with_internal_structure_from_models(self):
def get_internal_structure(set, particle=None):
return self.models[(set.key == particle.key).nonzero()[0]]
result = Particles(len(self.models))
result.add_function_attribute("get_internal_structure", None, get_internal_structure)
result.mass = [model.dmass.sum().as_quantity_in(self.mass_unit) for model in self.models]
result.radius = [model.radius[-1].as_quantity_in(self.radius_unit) for model in self.models]
result.position = (self.original_center_of_mass + self.stars_after_encounter.position).as_quantity_in(self.position_unit)
result.velocity = (self.original_center_of_mass_velocity + self.stars_after_encounter.velocity).as_quantity_in(self.velocity_unit)
return result
def local_copy_of_particles(self, primary, secondary):
particles = Particles(0)
particles.add_particle(primary)
particles.add_particle(secondary)
return particles
def collect_required_attributes(self, particles, gravity_code, stellar_evolution_code):
# Collect the required attributes and copy to the particles in memory
required_attributes = set(["mass", "x","y","z", "vx","vy","vz", "radius"])
required_attributes -= set(particles.get_attribute_names_defined_in_store())
for code in [stellar_evolution_code, gravity_code]:
attrs_in_code = required_attributes & set(code.particles.get_attribute_names_defined_in_store())
if len(attrs_in_code) > 0:
code.particles.copy_values_of_attributes_to(list(attrs_in_code), particles)
required_attributes -= attrs_in_code
self.mass_unit = particles.mass.unit
self.radius_unit = particles.radius.unit
self.position_unit = particles.position.unit
self.velocity_unit = particles.velocity.unit
self.dynamical_timescale = numpy.pi * (particles.radius.sum()**3 / (8 * constants.G * particles.total_mass())).sqrt()
def start_kepler(self, mass_unit, length_unit):
unit_converter = nbody_system.nbody_to_si(mass_unit, length_unit)
self.kepler = Kepler(unit_converter, redirection = "none" if self.debug else "null")
self.kepler.initialize_code()
def initialize_binary_in_kepler(self, star_a, star_b):
self.kepler.initialize_from_dyn(
star_a.mass + star_b.mass,
star_a.x - star_b.x, star_a.y - star_b.y, star_a.z - star_b.z,
star_a.vx-star_b.vx, star_a.vy-star_b.vy, star_a.vz-star_b.vz
)
return self.kepler
def backtrack_particles(self, particles):
self.original_center_of_mass = particles.center_of_mass()
self.original_center_of_mass_velocity = particles.center_of_mass_velocity()
initial_separation = self.initial_separation * particles.radius.sum()
if self.verbose:
print("Particles at collision:")
print(particles)
print("Backtrack particles to initial separation", initial_separation.as_string_in(units.RSun))
self.start_kepler(particles.total_mass(), initial_separation)
kepler = self.initialize_binary_in_kepler(particles[0], particles[1])
kepler.return_to_radius(initial_separation)
self.begin_time = kepler.get_time()
particles[1].position = kepler.get_separation_vector()
particles[1].velocity = kepler.get_velocity_vector()
kepler.advance_to_periastron()
self.begin_time -= kepler.get_time()
particles[0].position = [0, 0, 0] | units.m
particles[0].velocity = [0, 0, 0] | units.m / units.s
particles.move_to_center()
if self.verbose:
print("Backtracking particles done. Initial conditions:")
print(particles)
def convert_stars(self, particles, stellar_evolution_code):
n_particles = self.divide_number_of_particles(particles)
se_colliders = particles.get_intersecting_subset_in(stellar_evolution_code.particles)
if self.verbose:
print("Converting stars of {0} to SPH models of {1} particles, respectively.".format(particles.mass, n_particles))
sph_models = (
self.relax(convert_stellar_model_to_SPH(se_colliders[0], n_particles[0], **self.star_to_sph_arguments)),
self.relax(convert_stellar_model_to_SPH(se_colliders[1], n_particles[1], **self.star_to_sph_arguments))
)
gas_particles = Particles()
for particle, sph_model in zip(particles, sph_models):
sph_model.position += particle.position
sph_model.velocity += particle.velocity
gas_particles.add_particles(sph_model)
if self.verbose:
print("Converting stars to SPH particles done")
if self.debug:
print(gas_particles)
return gas_particles
def divide_number_of_particles(self, particles):
n1 = int(0.5 + self.number_of_particles * particles[0].mass / particles.total_mass())
return (n1, self.number_of_particles - n1)
def relax(self, sph_model):
if self.debug:
monitor = dict(time=[]|units.day, kinetic=[]|units.J, potential=[]|units.J, thermal=[]|units.J)
gas_particles = sph_model.gas_particles
hydro = self.new_hydrodynamics(gas_particles)
hydro.parameters.artificial_viscosity_alpha = 0.0 # Viscous damping doesn't seem to be very important, but turned off just in case...
channel_from_hydro = hydro.gas_particles.new_channel_to(gas_particles)
channel_to_hydro = gas_particles.new_channel_to(hydro.gas_particles)
dynamical_timescale = numpy.pi * (gas_particles.total_radius()**3 / (8 * constants.G * gas_particles.total_mass())).sqrt()
t_end_in_t_dyn = 2.5 # Relax for this many dynamical timescales
n_steps = 100
velocity_damp_factor = 1.0 - (2.0*numpy.pi*t_end_in_t_dyn)/n_steps # Critical damping
if self.verbose:
print("Relaxing SPH model with {0} for {1} ({2} dynamical timescales).".format(
self.hydrodynamics.__name__,
(t_end_in_t_dyn*dynamical_timescale).as_string_in(units.day),
t_end_in_t_dyn))
for i_step, time in enumerate(t_end_in_t_dyn*dynamical_timescale * numpy.linspace(1.0/n_steps, 1.0, n_steps)):
hydro.evolve_model(time)
channel_from_hydro.copy_attributes(["mass","x","y","z","vx","vy","vz","u"])
gas_particles.position -= gas_particles.center_of_mass()
gas_particles.velocity = velocity_damp_factor * (gas_particles.velocity - gas_particles.center_of_mass_velocity())
channel_to_hydro.copy_attributes(["x","y","z","vx","vy","vz"])
if self.debug:
K, U, Q = hydro.kinetic_energy, hydro.potential_energy, hydro.thermal_energy
print("t, K, U, Q:", time, K, U, Q)
monitor["time"].append(time)
monitor["kinetic"].append(K)
monitor["potential"].append(U)
monitor["thermal"].append(Q)
hydro.stop()
if self.debug:
energy_evolution_plot(monitor["time"], monitor["kinetic"], monitor["potential"], monitor["thermal"])
return gas_particles
def no_relax(self, sph_model):
return sph_model.gas_particles
def new_hop(self, particles):
converter = nbody_system.nbody_to_si(particles.total_mass(), 1.0 | units.RSun)
if self.debug:
print("Output of Hop is redirected to hop_out.log")
options = dict(redirection="file", redirect_file="hop_out.log")
else:
options = dict()
hop = Hop(unit_converter=converter, **options)
hop.parameters.number_of_neighbors_for_hop = 100
hop.parameters.saddle_density_threshold_factor = 0.8
hop.parameters.relative_saddle_density_threshold = True
return hop
def new_hydrodynamics(self, gas_particles):
unit_converter = nbody_system.nbody_to_si(gas_particles.total_mass(), self.dynamical_timescale)
hydro = self.hydrodynamics(unit_converter, **self.hydrodynamics_arguments)
hydro.initialize_code()
for par, value in self.hydrodynamics_parameters.items():
setattr(hydro.parameters, par, value)
hydro.commit_parameters()
hydro.gas_particles.add_particles(gas_particles)
hydro.commit_particles()
return hydro
def simulate_collision(self, gas_particles):
self.hop = self.new_hop(gas_particles)
hydro = self.new_hydrodynamics(gas_particles)
channel = hydro.gas_particles.new_channel_to(gas_particles)
if self.verbose:
print("Simulating collision with {0} from {1} to {2}.".format(
self.hydrodynamics.__name__,
self.begin_time.as_string_in(units.day),
(self.dynamical_timescales_per_step * self.dynamical_timescale).as_string_in(units.day)))
hydro.evolve_model(self.dynamical_timescales_per_step * self.dynamical_timescale - self.begin_time)
channel.copy_attributes(["x","y","z","vx","vy","vz","pressure","density","u"])
extra_steps_counter = 0
while True:
if self.encounter_is_over(gas_particles):
extra_steps_counter += 1
if extra_steps_counter > self.extra_steps_when_encounter_is_over:
print("Encounter is over and finished extra steps.")
break
else:
print("Encounter is over. Now performing step {0} out of {1} extra steps".format(
extra_steps_counter, self.extra_steps_when_encounter_is_over))
else:
extra_steps_counter = 0
print("Continuing to {0}.".format((hydro.model_time + self.next_dt + self.begin_time).as_string_in(units.day)))
if self.continue_with_kepler:
self.evolve_with_kepler(hydro)
hydro.evolve_model(hydro.model_time + self.next_dt)
channel.copy_attributes(["x","y","z","vx","vy","vz","pressure","density","u"])
hydro.stop()
self.hop.stop()
self.kepler.stop()
def encounter_is_over(self, gas_particles):
self.next_dt = self.dynamical_timescales_per_step * self.dynamical_timescale
groups = self.group_bound_particles(gas_particles)
stars = self.convert_groups_to_stars(groups)
self.groups_after_encounter = groups
self.stars_after_encounter = stars
if len(stars) > 1:
# Should do full check for stable binaries, triple, multiples, two escapers,
# escaping star + binary, etc.
# For now we only check whether the two most massive groups will (re)collide
a, b = stars.sorted_by_attribute("mass")[-2:]
if self.debug: print("System consists of {0} groups. The two most massive are: {1} and {2}.".format(len(stars), a.mass.as_string_in(units.MSun), b.mass.as_string_in(units.MSun)))
if self.binary_will_collide(a, b):
return False
if self.verbose:
print("Encounter is over, {0} stars after encounter.".format(len(groups)))
return True
def group_bound_particles(self, gas_particles):
groups, lost = self.analyze_particle_distribution(gas_particles)
while len(lost) > 0:
if self.debug:
group_plot(groups, lost)
previous_number_of_lost_particles = len(lost)
groups, lost = self.select_bound_particles(groups, lost)
if len(lost) == previous_number_of_lost_particles:
break
return groups
def convert_groups_to_stars(self, groups):
stars = Particles(len(groups))
for star, group in zip(stars, groups):
star.mass = group.total_mass()
star.position = group.center_of_mass()
star.velocity = group.center_of_mass_velocity()
star.radius = group.LagrangianRadii(mf=[0.9], cm=star.position)[0][0]
return stars
def analyze_particle_distribution(self, gas_particles):
if self.verbose:
print("Analyzing particle distribution using Hop")
if "density" in gas_particles.get_attribute_names_defined_in_store():
if self.debug: print("Using the original particles' density")
self.hop.parameters.outer_density_threshold = 0.5 * gas_particles.density.mean()
self.hop.particles.add_particles(gas_particles)
gas_particles.copy_values_of_attribute_to("density", self.hop.particles)
else:
if self.debug: print("Using Hop to calculate the density")
self.hop.particles.add_particles(gas_particles)
self.hop.calculate_densities()
self.hop.parameters.outer_density_threshold = 0.5 * self.hop.particles.density.mean()
self.hop.do_hop()
result = []
for group in self.hop.groups():
result.append(group.get_intersecting_subset_in(gas_particles))
lost = self.hop.no_group().get_intersecting_subset_in(gas_particles)
self.hop.particles.remove_particles(self.hop.particles)
return result, lost
def select_bound_particles(self, groups, lost):
specific_total_energy_relative_to_group = [] | (units.m / units.s)**2
for group in groups:
group_mass = group.total_mass()
group_com = group.center_of_mass()
group_com_velocity = group.center_of_mass_velocity()
specific_total_energy_relative_to_group.append(
(lost.velocity - group_com_velocity).lengths_squared() + lost.u -
constants.G * group_mass / (lost.position - group_com).lengths())
index_minimum = specific_total_energy_relative_to_group.argmin(axis=0)
bound=lost[:0]
for i, group in enumerate(groups):
bound_to_group = lost[numpy.logical_and(
index_minimum == i,
specific_total_energy_relative_to_group[i] < 0 | (units.m / units.s)**2
)]
bound += bound_to_group
groups[i] = group + bound_to_group
return groups, lost - bound
def binary_will_collide(self, a, b):
self.continue_with_kepler = False
if self.verbose:
print("Using Kepler to check whether the two stars will (re)collide.")
kepler = self.initialize_binary_in_kepler(a, b)
true_anomaly = kepler.get_angles()[1]
eccentricity = kepler.get_elements()[1]
if true_anomaly > 0.0 and eccentricity >= 1.0:
if self.verbose:
print("Stars are on hyperbolic/parabolic orbits and moving away from each other, interaction is over.")
return False
periastron = kepler.get_periastron()
will_collide = periastron < a.radius + b.radius
if self.verbose:
print("Stars {0} collide. Distance at periastron: {1}, sum of radii: {2}".format(
"will" if will_collide else "won't",
periastron.as_string_in(units.RSun), (a.radius + b.radius).as_string_in(units.RSun)))
if will_collide:
# 1) check whether the stars are still relaxing: less than ~3 t_dyn passed since last moment of contact --> relax
# 2) check whether the stars are already within 'initial_separation', else skip (dtmax?)
kepler.advance_to_periastron()
self.next_dt = kepler.get_time() + self.dynamical_timescales_per_step * self.dynamical_timescale
if self.debug:
print("Time to collision: {0}, next_dt: {1}".format(
kepler.get_time().as_string_in(units.day), self.next_dt.as_string_in(units.day)))
if kepler.get_time() > 3 * self.dynamical_timescale and kepler.get_apastron() > 2.0 * self.initial_separation * (a.radius + b.radius):
# evolve for 3 * self.dynamical_timescale and skip the rest until ~initial_separation
kepler.return_to_apastron()
kepler.return_to_radius(a.radius + b.radius)
if -kepler.get_time() > 2.9 * self.dynamical_timescale: # If ~3 t_dyn have passed since the end of the collision
if self.verbose: print("~3 t_dyn have passed since the end of the collision -> skip to next collision")
self.continue_with_kepler = True
kepler.advance_to_apastron()
kepler.advance_to_radius(2.0 * self.initial_separation * (a.radius + b.radius))
self.skip_to_relative_position_velocity = (kepler.get_separation_vector(), kepler.get_velocity_vector())
self.begin_time = kepler.get_time()
kepler.advance_to_periastron()
self.next_dt = self.dynamical_timescales_per_step * self.dynamical_timescale + kepler.get_time() - self.begin_time
else:
self.next_dt = 3 * self.dynamical_timescale + kepler.get_time()
return will_collide
def evolve_with_kepler(self, hydro):
if self.verbose: print("evolve_with_kepler")
indices_two_most_massive = self.stars_after_encounter.mass.argsort()[-2:]
groups = [self.groups_after_encounter[i] for i in indices_two_most_massive]
old_particles = self.stars_after_encounter[indices_two_most_massive]
new_particles = Particles(2)
new_particles.mass = old_particles.mass
new_particles[0].position, new_particles[0].velocity = self.skip_to_relative_position_velocity
new_particles.move_to_center()
for group, old_particle, new_particle in zip(groups, old_particles, new_particles):
in_hydro = group.get_intersecting_subset_in(hydro.gas_particles)
if self.verbose: print(in_hydro.center_of_mass().as_quantity_in(units.RSun), old_particle.position.as_quantity_in(units.RSun), new_particle.position.as_quantity_in(units.RSun))
in_hydro.position += new_particle.position - old_particle.position
in_hydro.velocity += new_particle.velocity - old_particle.velocity
def group_plot(groups, no_group, figname="group_plot.png"):
colors = ["r", "g", "b", "y", "k", "w"]*100
for group, color in zip(groups, colors):
scatter(group.x, group.y, c=color)
if len(no_group):
scatter(no_group.x, no_group.y, c="m", marker="s")
native_plot.gca().set_aspect("equal", adjustable = "datalim")
native_plot.savefig(figname)
native_plot.clf()
def energy_evolution_plot(time, kinetic, potential, thermal, figname="energy_evolution.png"):
native_plot.subplot(211)
plot(time, kinetic, label='K')
plot(time, potential, label='U')
plot(time, thermal, label='Q')
plot(time, kinetic + potential + thermal, label='E')
xlabel('Time')
ylabel('Energy')
native_plot.legend(prop={'size':"x-small"}, loc=4)
native_plot.subplot(212)
plot(time, thermal, label='Q')
native_plot.savefig(figname)
native_plot.clf()
| 22,588
| 50.928736
| 190
|
py
|
amuse
|
amuse-main/src/amuse/ext/salpeter.py
|
import warnings
from amuse.ic.salpeter import *
warnings.warn("amuse.ext.salpeter has moved to amuse.ic.salpeter", DeprecationWarning)
| 137
| 22
| 86
|
py
|
amuse
|
amuse-main/src/amuse/ext/grid_to_sph.py
|
import numpy
from amuse.support.exceptions import AmuseException
from amuse.units import units
from amuse.datamodel import Particles
class Grid2SPH(object):
"""
Converts a (cartesian) hydrodynamics Grid into an SPH model consisting of the
specified number of particles. The Grid must have position, rho, momentum and
energy defined on each cell.
grid, number_of_sph_particles, base_distribution_type = "uniform", seed = None):
:argument grid: Star particle to be converted to an SPH model
:argument number_of_sph_particles: Number of gas particles in the resulting model
:argument base_distribution_type: Type of the base particle distribution ("random" or "uniform")
"random": particle positions are randomly sampled from the density weighted cells
"uniform": particles are uniformly sampled from the density weighted cells (note
that they are assigned a random (!) position within that cell subsequently)
:argument seed: If provided, seed for the random number generator
"""
def __init__(self, grid, number_of_sph_particles, base_distribution_type = "uniform", seed = None):
if (grid.number_of_dimensions() != 3):
raise AmuseException("Grid must be 3D")
if not hasattr(grid,"momentum"):
grid.add_global_vector_attribute("momentum", ["rhovx","rhovy","rhovz"])
self.grid = grid
self.shape = grid.shape
self.number_of_sph_particles = number_of_sph_particles
self.base_distribution_type = base_distribution_type # "random" or "uniform"
if seed:
numpy.random.seed(seed)
def setup_lookup_tables(self):
# Retrieve details of the grid and convert them to fast lookup tables
shape_for_vector_multiply = list(self.grid.shape)
shape_for_vector_multiply.append(1)
density = self.grid.rho
summed_density = density.sum()
self.cumulative_weight = numpy.cumsum((density / summed_density))
self.position_lookup_table = self.grid.position.reshape((-1,3))
self.velocity_lookup_table = (self.grid.momentum / density.reshape(shape_for_vector_multiply)).reshape((-1,3))
self.specific_internal_energy_lookup_table = (self.grid.energy / density).flatten() - 0.5 * self.velocity_lookup_table.lengths_squared()
self.density_lookup_table = density.flatten()
cellsize = self.grid.cellsize()
self.cellsize_unit = cellsize.unit
self.cellsize_number = cellsize.value_in(cellsize.unit)
self.mass = summed_density * (cellsize[0] * cellsize[1] * cellsize[2])
def setup_variates(self):
# Generate (quasi-)random realisation
variates = self.generate_variates(self.number_of_sph_particles)
self.indices = numpy.searchsorted(self.cumulative_weight, variates)
def generate_variates(self, number_of_variates):
if self.base_distribution_type == "uniform":
return numpy.linspace(0.0, 1.0, num=number_of_variates, endpoint=False)
elif self.base_distribution_type == "random":
return numpy.random.uniform(0.0, 1.0, number_of_variates)
else:
raise AmuseException("Unknown base_distribution_type: {0}. Possible "
"options are: 'random' or 'uniform'.".format(self.base_distribution_type))
def new_particle_positions(self):
base_positions = self.position_lookup_table[self.indices]
return base_positions + self.cellsize_unit.new_quantity(
self.cellsize_number * numpy.random.uniform(-0.5, 0.5, (self.number_of_sph_particles, 3)))
def new_particle_velocities(self):
return self.velocity_lookup_table[self.indices]
def new_particle_specific_internal_energies(self):
return self.specific_internal_energy_lookup_table[self.indices]
def new_particle_densities(self):
return self.density_lookup_table[self.indices]
@property
def result(self):
self.setup_lookup_tables()
self.setup_variates()
sph_particles = Particles(self.number_of_sph_particles)
sph_particles.position = self.new_particle_positions()
sph_particles.velocity = self.new_particle_velocities()
sph_particles.u = self.new_particle_specific_internal_energies()
sph_particles.rho = self.new_particle_densities()
sph_particles.mass = (self.mass.number * 1.0 / self.number_of_sph_particles) | self.mass.unit
# Crude estimate of the smoothing length; the SPH code will calculate the true value itself.
sph_particles.h_smooth = (self.grid.get_volume() * 50.0/self.number_of_sph_particles)**(1/3.0)
return sph_particles
def convert_grid_to_SPH(grid, number_of_sph_particles, **keyword_arguments):
"""
Converts a (cartesian) hydrodynamics Grid into an SPH model consisting of the
specified number of particles. The Grid must have position, rho, momentum and
energy defined on each cell.
grid, number_of_sph_particles, base_distribution_type = "uniform", seed = None):
:argument grid: Star particle to be converted to an SPH model
:argument number_of_sph_particles: Number of gas particles in the resulting model
:argument base_distribution_type: Type of the base particle distribution ("random" or "uniform")
"random": particle positions are randomly sampled from the density weighted cells
"uniform": particles are uniformly sampled from the density weighted cells (note
that they are assigned a random (!) position within that cell subsequently)
:argument seed: If provided, seed for the random number generator
"""
converter = Grid2SPH(grid, number_of_sph_particles, **keyword_arguments)
return converter.result
| 5,851
| 49.448276
| 144
|
py
|
amuse
|
amuse-main/src/amuse/ext/solarsystem.py
|
import numpy
from amuse.units import units, nbody_system, constants
from amuse.datamodel import Particles, Particle
pi_over_180 = numpy.pi/180.
_solsysdat= \
[['MERCURY',1.66013679527193009E-07,20.,5.43, \
-3.83966017419175965E-01, -1.76865300855700736E-01, 2.07959213998758705E-02, \
5.96286238644834141E-03, -2.43281292146216750E-02,-2.53463209848734695E-03, \
0., 0., 0.],
['VENUS',2.44783833966454430E-06,20.,5.24, \
6.33469157915745540E-01, 3.49855234102151691E-01,-3.17853172088953667E-02, \
-9.84258038001823571E-03, 1.76183746921837227E-02, 8.08822351013463794E-04, \
0., 0., 0.],
['EARTHMOO',3.04043264264672381E-06,20.,5.52, \
2.42093942183383037E-01, -9.87467766698604366E-01, -4.54276292555233496E-06, \
1.64294055023289365E-02, 4.03200725816140870E-03, 1.13609607260006795E-08, \
0., 0., 0.],
['MARS',3.22715144505386530E-07,20.,3.94, \
2.51831018120174499E-01, 1.52598983115984788E+00, 2.57781137811807781E-02, \
-1.32744166042475433E-02, 3.46582959610421387E-03, 3.98930013246952611E-04, \
0., 0., 0.],
['JUPITER',9.54791938424326609E-04,3.,1.33, \
4.84143144246472090E+00, -1.16032004402742839E+00, -1.03622044471123109E-01, \
1.66007664274403694E-03, 7.69901118419740425E-03, -6.90460016972063023E-05, \
0., 0., 0.],
['SATURN',2.85885980666130812E-04,3.,0.70, \
8.34336671824457987E+00, 4.12479856412430479E+00, -4.03523417114321381E-01, \
-2.76742510726862411E-03, 4.99852801234917238E-03, 2.30417297573763929E-05, \
0., 0., 0.],
['URANUS',4.36624404335156298E-05,3.,1.30, \
1.28943695621391310E+01, -1.51111514016986312E+01, -2.23307578892655734E-01, \
2.96460137564761618E-03, 2.37847173959480950E-03, -2.96589568540237556E-05, \
0., 0., 0.],
['NEPTUNE',5.15138902046611451E-05,3.,1.76, \
1.53796971148509165E+01, -2.59193146099879641E+01, 1.79258772950371181E-01, \
2.68067772490389322E-03, 1.62824170038242295E-03, -9.51592254519715870E-05, \
0., 0., 0.],
['PLUTO',7.39644970414201173E-09,3.,1.1, \
-1.15095623952731607E+01, -2.70779438829451422E+01, 6.22871533567077229E+00, \
2.97220056963797431E-03, -1.69820233395912967E-03, -6.76798264809371094E-04, \
0., 0., 0.]]
def _planets_only(define_mercury_attributes = False):
data = numpy.array([tuple(entry) for entry in _solsysdat], dtype=[('name','S10'),
('mass','<f8'), ('celimit','<f8'), ('density','<f8'),
('x','<f8'), ('y','<f8'), ('z','<f8'),
('vx','<f8'), ('vy','<f8'), ('vz','<f8'),
('Lx','<f8'), ('Ly','<f8'), ('Lz','<f8')])
planets = Particles(len(_solsysdat))
planets.name = list(data['name'])
print(planets.name.dtype)
planets.mass = units.MSun.new_quantity(data['mass'])
density = (units.g/units.cm**3).new_quantity(data['density'])
planets.radius = ((planets.mass/density) ** (1/3.0)).as_quantity_in(units.km)
for attribute in ['x', 'y', 'z']:
setattr(planets, attribute, units.AU.new_quantity(data[attribute]))
for attribute in ['vx', 'vy', 'vz']:
setattr(planets, attribute, units.AUd.new_quantity(data[attribute]).as_quantity_in(units.km / units.s))
if define_mercury_attributes:
planets.density = density
angular_momentum_unit = units.MSun * units.AU**2/units.day
for attribute in ['Lx', 'Ly', 'Lz']:
setattr(planets, attribute, angular_momentum_unit.new_quantity(data[attribute]).as_quantity_in(units.J * units.s))
planets.celimit = units.none.new_quantity(data['celimit'])
return planets
def new_solar_system_for_mercury():
"""
Create initial conditions for the symplectic integrator Mercury, describing
the solar system. Returns a tuple consisting of two particle sets. The first
set contains the central particle (sun) and the second contains the planets
and Pluto (the 'orbiters'). The positions and velocities are in heliocentric
coordinates.
Defined attributes sun:
name, mass, radius, j2, j4, j6, Lx, Ly, Lz
Defined attributes orbiters:
name, mass, radius, density, x, y, z, vx, vy, vz, Lx, Ly, Lz, celimit
"""
planets = _planets_only(define_mercury_attributes = True)
centre = Particles(1)
centre.name = 'SUN'
centre.mass = 1.0 | units.MSun
centre.radius = 0.0000001 | units.AU
centre.j2 = .0001|units.AU**2
centre.j4 = .0|units.AU**4
centre.j6 = .0|units.AU**6
centre.angular_momentum = [0.0, 0.0, 0.0] | units.MSun * units.AU**2/units.day
return centre, planets
def new_kepler():
from amuse.community.kepler.interface import Kepler
converter = nbody_system.nbody_to_si(1|units.MSun,1|units.AU)
kepler = Kepler(converter)
kepler.initialize_code()
return kepler
def get_position(mass_sun, mass_planet, ecc, semi, mean_anomaly, incl, argument, longitude, delta_t=0.|units.day):
"""
cartesian position and velocity from orbital elements,
where the orbit is evolved from given mean_anomaly
by time delta_t
argument -- argument of perihelion
longitude -- longitude of ascending node
"""
kepler = new_kepler()
kepler.initialize_from_elements(mass=(mass_sun+mass_planet),
semi=semi,
ecc=ecc,
mean_anomaly=mean_anomaly)
kepler.transform_to_time(time=delta_t)
r = kepler.get_separation_vector()
v = kepler.get_velocity_vector()
kepler.stop()
a1 = ([numpy.cos(longitude), -numpy.sin(longitude), 0.0], [numpy.sin(longitude), numpy.cos(longitude), 0.0], [0.0, 0.0, 1.0])
a2 = ([1.0, 0.0, 0.0], [0.0, numpy.cos(incl), -numpy.sin(incl)], [0.0, numpy.sin(incl), numpy.cos(incl)])
a3 = ([numpy.cos(argument), -numpy.sin(argument), 0.0], [numpy.sin(argument), numpy.cos(argument), 0.0], [0.0, 0.0, 1.0])
A = numpy.dot(numpy.dot(a1,a2),a3)
r_vec = numpy.dot(A,numpy.reshape(r,3,'F'))
v_vec = numpy.dot(A,numpy.reshape(v,3,'F'))
# for relative vectors
r[0] = r_vec[0]
r[1] = r_vec[1]
r[2] = r_vec[2]
v[0] = v_vec[0]
v[1] = v_vec[1]
v[2] = v_vec[2]
return r,v
def get_sun_and_planets(delta_JD=0.|units.day):
"""
eight planets of the Solar System
as for JD = 2457099.500000000 = A.D. 2015-Mar-18 00:00:00.0000 (CT)
http://ssd.jpl.nasa.gov/horizons.cgi
"""
planets = Particles(8)
# mass
planets.mass = [3.302e23,
48.685e23,
5.97219e24,
6.4185e23,
1898.13e24,
5.68319e26,
86.8103e24,
102.41e24] | units.kg
#radius
planets.radius = [2439.7,
6051.8,
6378.1,
3396.2,
71492,
60268,
25559,
24764] | units.km
# eccentricity
planets_ecc = [2.056263501026885E-01,
6.756759719005901E-03,
1.715483324953308E-02,
9.347121362500883E-02,
4.877287772914470E-02,
5.429934603664216E-02,
4.911406962716518E-02,
8.494660388602767E-03]
# semi-major axis
planets_semi = [3.870989725156447E-01,
7.233252880006816E-01,
1.000816989613834E+00,
1.523624142457679E+00,
5.203543088590996E+00,
9.547316304899041E+00,
1.915982879739036E+01,
2.997013749028780E+01] | units.AU
# mean anomaly [degrees]
planets_mean_anomaly = [2.256667460183225E+02,
3.096834722926926E+02,
6.970055236286768E+01,
5.013506750245609E+01,
1.213203242081277E+02,
1.423311616732398E+02,
2.079860620353052E+02,
2.712246916734600E+02]
planets_mean_anomaly = numpy.array(planets_mean_anomaly) * pi_over_180
# inclination [IN degrees]
planets_inclination = [7.004026765179669E+00,
3.394480103844425E+00,
3.563477431351056E-03,
1.848403408106458E+00,
1.303457729562742E+00,
2.488017444885577E+00,
7.728000142736371E-01,
1.767720502209091E+00]
planets_inclination = numpy.array(planets_inclination) * pi_over_180
# Longitude of Ascending Node [OM degrees]
planets_longitude = [4.831163083479358E+01,
7.663982595051040E+01,
1.775515437672556E+02,
4.951282677064384E+01,
1.005036717671826E+02,
1.135683875842263E+02,
7.388411509910506E+01,
1.317497218434830E+02]
planets_longitude = numpy.array(planets_longitude) * pi_over_180
# Argument of Perihelion [W degrees]
planets_argument = [2.916964171964058E+01,
5.469102797401222E+01,
2.877495001117996E+02,
2.865420083537150E+02,
2.740725976811202E+02,
3.398666856578898E+02,
9.666856264946740E+01,
2.951871807292030E+02]
planets_argument = numpy.array(planets_argument) * pi_over_180
planets.name = ['Mercury',
'Venus',
'Earth',
'Mars',
'Jupiter',
'Saturn',
'Uranus',
'Neptune']
### to compare with JPL, mass of the Sun needs to be rescaled
#mg_nasa = 1.32712440018e20 | (units.m**3 / units.s**2)
#g_nasa = 6.67259e-11 | (units.m**3 / units.kg / units.s**2)
#ms = mg_nasa / g_nasa
sun = Particle()
sun.name = 'Sun'
#sun.mass = ms
sun.mass = 1.0 | units.MSun
sun.position = [0.,0.,0.] | units.AU
sun.velocity = [0.,0.,0.] | units.kms
# get the position and velocity vectors relative to sun
# by evolving in Kepler
for i,ecc_i in enumerate(planets_ecc):
r, v = get_position(sun.mass,
planets[i].mass,
planets_ecc[i],
planets_semi[i],
planets_mean_anomaly[i],
planets_inclination[i],
planets_longitude[i],
planets_argument[i],
delta_t=delta_JD)
planets[i].position = r
planets[i].velocity = v
return sun, planets
def solar_system_in_time(time_JD=2457099.5|units.day):
"""
Initial conditions of Solar system --
particle set with the sun + eight planets,
at the center-of-mass reference frame.
Defined attributes:
name, mass, radius, x, y, z, vx, vy, vz
"""
time_0 = 2457099.5 | units.day
delta_JD = time_JD-time_0
sun, planets = get_sun_and_planets(delta_JD=delta_JD)
solar_system = Particles()
solar_system.add_particle(sun)
solar_system.add_particles(planets)
solar_system.move_to_center()
### to compare with JPL, relative positions and velocities need to be corrected for the
# Sun's vectors with respect to the barycenter
#r_s = (3.123390770608490E-03, -4.370830943817017E-04, -1.443425433116342E-04) | units.AU
#v_s = (3.421633816761503E-06, 5.767414405893875E-06, -8.878039607570240E-08) | (units.AU / units.day)
#print sun
#print planets.position.in_(units.AU) + r_s
#print planets.velocity.in_(units.AU/units.day) + v_s
return solar_system
def old_new_solar_system():
"""
Create initial conditions describing the solar system. Returns a single
particle set containing the sun, planets and Pluto. The model is centered at
the origin (center-of-mass(-velocity) coordinates).
Defined attributes:
name, mass, radius, x, y, z, vx, vy, vz
"""
sun = Particle()
sun.name = 'SUN'
sun.mass = 1.0 | units.MSun
sun.radius = 1.0 | units.RSun
planets = _planets_only()
particles = Particles()
particles.add_particle(sun)
particles.add_particles(planets)
particles.move_to_center()
return particles
def new_solar_system(Julian_date=-1|units.day):
if Julian_date<0|units.day:
return old_new_solar_system()
else:
return solar_system_in_time(Julian_date)
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-d", dest="Julian_date", unit=units.day,
type=float, default = 2438871.5|units.day,
help="julian date [%default]")
return result
if __name__ in ('__main__', '__plot__'):
o, arguments = new_option_parser().parse_args()
solar_system = new_solar_system(o.Julian_date)
print(solar_system)
| 12,891
| 36.806452
| 127
|
py
|
amuse
|
amuse-main/src/amuse/ext/derived_grav_systems.py
|
from amuse.units import constants
class center_of_mass(object):
"""
com=center_of_mass(grav_instance)
derived system, returns center of mass as skeleton grav system
provides: get_gravity_at_point, get_potential_at_point
"""
def __init__(self,baseclass):
self.baseclass=baseclass
def get_gravity_at_point(self,radius,x,y,z):
mass=self.baseclass.total_mass
xx,yy,zz=self.baseclass.get_center_of_mass_position()
eps2=self.baseclass.parameters.epsilon_squared
dr2=((xx-x)**2+(yy-y)**2+(zz-z)**2+eps2)
ax=constants.G*mass*(xx-x)/dr2**1.5
ay=constants.G*mass*(yy-y)/dr2**1.5
az=constants.G*mass*(zz-z)/dr2**1.5
return ax,ay,az
def get_potential_at_point(self,radius,x,y,z):
mass=self.baseclass.total_mass
xx,yy,zz=self.baseclass.get_center_of_mass_position()
eps2=self.baseclass.parameters.epsilon_squared
dr2=((xx-x)**2+(yy-y)**2+(zz-z)**2+eps2)
phi=-constants.G*mass/dr2**0.5
return phi
class copycat(object):
"""
copy=copycat(base_class,grav_instance, converter)
derived system, returns copy of grav instance with
get_gravity_at_point, get_potential_at_point reimplemented in
base_class
"""
def __init__(self,baseclass, system,converter):
self.baseclass=baseclass
self.system=system
self.converter=converter
def get_gravity_at_point(self,radius,x,y,z):
instance=self.baseclass(self.converter)
instance.initialize_code()
instance.parameters.epsilon_squared = self.system.parameters.epsilon_squared
parts=self.system.particles.copy()
instance.particles.add_particles(parts)
ax,ay,az=instance.get_gravity_at_point(radius,x,y,z)
instance.stop()
return ax,ay,az
def get_potential_at_point(self,radius,x,y,z):
instance=self.baseclass(self.converter)
instance.initialize_code()
instance.parameters.epsilon_squared = self.system.parameters.epsilon_squared
parts=self.system.particles.copy()
instance.particles.add_particles(parts)
phi=instance.get_potential_at_point(radius,x,y,z)
instance.stop()
return phi
| 2,340
| 30.213333
| 84
|
py
|
amuse
|
amuse-main/src/amuse/ext/galactics_model.py
|
from amuse.community.galactics.interface import GalactICs, GalactICsInterface
from amuse.community.galactics.gas_interface import GaslactICs, GaslactICsInterface
from amuse.datamodel import ParticlesWithUnitsConverted, Particles
from amuse.datamodel.particles import ParticlesSuperset
def _new_galactics_model(halo_number_of_particles, unit_system_converter=None, do_scale=False, verbose=False, **keyword_arguments):
code=keyword_arguments.pop("code")
instance = code(unit_converter=unit_system_converter, redirection="none" if verbose else "null")
instance.parameters.halo_number_of_particles = halo_number_of_particles
for (key, value) in keyword_arguments.items():
setattr(instance.parameters, key, value)
if verbose:
print("adopted galaxy model parameters:")
print(instance.parameters)
instance.generate_particles()
result = instance.particles.copy()
if hasattr(instance,"gas_particles") and len(instance.gas_particles)>0:
resultgas=instance.gas_particles.copy()
else:
resultgas=Particles()
instance.stop()
if len(resultgas)>0:
allpart=ParticlesSuperset([result, resultgas])
else:
allpart=result
allpart.move_to_center()
# do_scale *is* possible for case with unit converter
# note that in case of scaling the output galaxy parameters may be very different from the model
# parameters input
if do_scale:
print("Warning: do_scale for a large galactics model may be very slow")
if verbose:
print("Warning: do_scale typically changes the galaxy scale parameters quite a lot from the input parameters")
if len(resultgas)>0:
# this is fixable
raise Exception("scaling of galaxy models with gas currently not possible")
allpart.scale_to_standard(convert_nbody=unit_system_converter)
if not unit_system_converter is None:
result = ParticlesWithUnitsConverted(result, unit_system_converter.as_converter_from_si_to_generic())
result = result.copy()
if len(resultgas)>0:
resultgas = ParticlesWithUnitsConverted(resultgas, unit_system_converter.as_converter_from_si_to_generic())
resultgas = resultgas.copy()
if len(resultgas)>0:
# resultincludes the gas particles (but not under the same keys)
return resultgas, result
else:
return result
def _create_docstring(code, codeInterface):
_tmp_instance = code()
docstring = "\nGalactICs documentation:\n" + codeInterface.__doc__ + \
"\n" + _tmp_instance.parameters.__doc__ + "\n"
_tmp_instance.stop()
return docstring
def _newModelWrapperWrapper(code, codeInterface):
class _NewModelMetaclass(type):
def _get_doc(self):
return _create_docstring(code, codeInterface)
__doc__ = property(_get_doc)
class _NewModelWrapper(object, metaclass=_NewModelMetaclass):
def _get_doc(self):
return _create_docstring(code, codeInterface)
@property
def __doc__(self):
return self._get_doc()
def __call__(self, *args, **kwargs):
return _new_galactics_model(*args, code=code,**kwargs)
return _NewModelWrapper
new_galactics_model = _newModelWrapperWrapper(GalactICs, GalactICsInterface)()
# these return two particle sets (gas, other_particles)..
new_galactics_gas_model = _newModelWrapperWrapper(GaslactICs, GaslactICsInterface)()
new_gaslactics_model = new_galactics_gas_model
| 3,541
| 39.712644
| 131
|
py
|
amuse
|
amuse-main/src/amuse/ext/bridge.py
|
"""
bridge-like integrator for amuse
the bridge class provides a bridge like coupling between different
gravitational integrators. In this way a system composed of multiple
components can be evolved taking account of the self gravity of the whole
system self consistently, while choosing the most appropiate integrator
for the self-gravity of the component systems. This is mainly useful for
systems consist of two or more components that are either well separated
spatially or have different scales (otherwise using a single integrator is
more efficient)
The main idea is that systems experience each others gravity through
periodic velocty kicks with ordinary evolution in between - the evolution
is thus described by an alternation of drift (D) and kick (K) operators,
here chosen as:
K(1/2 dt) D(dt) K(1/2 dt)
K(dt) denotes a kick of the velocities over a timestep dt, while D(dt)
denotes a drift, meaning secular evolution using self gravity of the
system, over dt.
implementation notes:
In order to use bridge the component systems should be initialized as usual,
then a bridge systems is initialized, after which one or more systems are
added:
from amuse.ext.bridge import bridge
bridgesys=bridge(verbose=False)
bridgesys.add_system(galaxy, (cluster,), False)
bridgesys.add_system(cluster, (galaxy,), True )
bridge builds on the full gravity interface, so unit handling etc is
guaranteed. Bridge itself is a (somewhat incomplete) gravity interface,
so the usual evolve, get_potential methods work (and bridge can be a
component in a bridge systems). Note that a single coordinate system should
be used at the moment for all the components systems (different units are
allowed though). The call to add systems, for example:
bridgesys.add_system(galaxy, False, (cluster,))
has three arguments: the system, a flag to specify whether
synchronization is needed and a set with *interaction* partners. The
interaction partners indicate which systems will kick the system. In the
most simple case these would be the set of other systems that are added,
but usually this is not what you want to get good performace. In some
cases you want to ignore one direction of interaction (eg. in a combined
simulation of a galaxy and a comet orbits around a star you may want the
ignore the gravity of the comet), in other cases you want to use a
different force calculator (eg integrating a cluster in a galaxy where
the galaxy is evolved with a tree code and the cluster with a direct sum
code, one also would want to use a tree code to calculate the cluster
gravity for the galaxy. In such a case one can derive a skeleton gravity
interface from the cluster system. A module is provided with some
examples of such *derived* systems, derived_grav_systems.py
Hints for good use:
The bridgesys is flexible but care should be taken in order to obtain
valid results. For one thing, there is no restriction or check on the
validity of the assumption of well seperated dynamics: for example any
system could be split up and put together in bridge, but if the timestep
is chosen to be larger than the timestep criterion of the code, the
integration will show errors.
For good performance one should use derived systems to reduce the
complexity where possible.
There is an issue with the synchronization: some codes do not end on the
exact time of an evolve, or need an explicit sync call. In these cases it
is up to the user to determine whether bridge can be used (an explicit
sync call may induce extra errors that degrade the order of the
integrator).
"""
# issues:
# - for now, units in si
# - a common coordinate system is used for all systems
# - sync of systems should be checked
# - timestepping: adaptive dt?
import threading
from amuse.units import quantities
from amuse.units.quantities import sign
from amuse.units import units
from amuse import datamodel
def potential_energy(system, get_potential):
parts=system.particles.copy()
pot=get_potential(parts.radius,parts.x,parts.y,parts.z)
return (pot*parts.mass).sum() / 2
def kick_system(system, get_gravity, dt):
parts=system.particles.copy()
ax,ay,az=get_gravity(parts.radius,parts.x,parts.y,parts.z)
parts.vx=parts.vx+dt*ax
parts.vy=parts.vy+dt*ay
parts.vz=parts.vz+dt*az
channel=parts.new_channel_to(system.particles)
channel.copy_attributes(["vx","vy","vz"])
# parts.copy_values_of_all_attributes_to(system.particles)
class bridge(object):
def __init__(self,verbose=False,method=None, use_threading=True, time=None):
"""
verbose indicates whether to output some run info
"""
self.systems=set()
self.partners=dict()
self.time_offsets=dict()
if time is None:
time=quantities.zero
self.time=time
self.do_sync=dict()
self.verbose=verbose
self.timestep=None
self.method=method
self.use_threading=use_threading
def add_system(self, interface, partners=set(),do_sync=True):
"""
add a system to bridge integrator
"""
if hasattr(interface,"model_time"):
self.time_offsets[interface]=(self.time-interface.model_time)
else:
self.time_offsets[interface]=quantities.zero
self.systems.add(interface)
for p in partners:
if not hasattr(p,"get_gravity_at_point"):
return -1
self.partners[interface]=partners
self.do_sync[interface]=do_sync
return 0
def evolve_model(self,tend,timestep=None):
"""
evolve combined system to tend, timestep fixes timestep
"""
if timestep is None:
if self.timestep is None:
timestep=tend-self.time
else:
timestep=self.timestep
timestep=sign(tend-self.time)*abs(timestep)
if self.method==None:
return self.evolve_joined_leapfrog(tend,timestep)
else:
return self.evolve_simple_steps(tend,timestep)
def evolve_simple_steps(self,tend,timestep):
while sign(timestep)*(tend - self.time) > sign(timestep)*timestep/2 : #self.time < (tend-timestep/2):
self._drift_time=self.time
self._kick_time=self.time
self.method(self.kick_systems,self.drift_systems_dt, timestep)
self.time=self.time+timestep
return 0
def evolve_joined_leapfrog(self,tend,timestep):
first=True
self._drift_time=self.time
self._kick_time=self.time
while sign(timestep)*(tend - self.time) > sign(timestep)*timestep/2: #self.time < (tend-timestep/2):
if first:
self.kick_systems(timestep/2)
first=False
else:
self.kick_systems(timestep)
self.drift_systems(self.time+timestep)
self.time=self.time+timestep
if not first:
self.kick_systems(timestep/2)
return 0
def synchronize_model(self):
"""
explicitly synchronize all components
"""
for x in self.systems:
if hasattr(x,"synchronize_model"):
if(self.verbose): print(x.__class__.__name__,"is synchronizing", end=' ')
x.synchronize_model()
if(self.verbose): print(".. done")
def get_potential_at_point(self,radius,x,y,z):
pot=quantities.zero
for sys in self.systems:
_pot=sys.get_potential_at_point(radius,x,y,z)
pot=pot+_pot
return pot
def get_gravity_at_point(self,radius,x,y,z):
ax=quantities.zero
ay=quantities.zero
az=quantities.zero
for sys in self.systems:
_ax,_ay,_az=sys.get_gravity_at_point(radius,x,y,z)
ax=ax+_ax
ay=ay+_ay
az=az+_az
return ax,ay,az
@property
def model_time(self):
return self.time
@property
def potential_energy(self):
Ep=quantities.zero
for x in self.systems:
Ep+=x.potential_energy
if hasattr(x,"particles") and len(x.particles)>0:
for y in self.partners[x]:
_Ep = potential_energy(x,y.get_potential_at_point)
if hasattr(y,"particles"):
Ep+=_Ep
else:
Ep+=2*_Ep
return Ep
@property
def kinetic_energy(self):
Ek=quantities.zero
for x in self.systems:
Ek+=x.kinetic_energy
return Ek
@property
def thermal_energy(self):
result=quantities.zero
for x in self.systems:
if hasattr(x,'thermal_energy'):
result+=x.thermal_energy
return result
@property
def particles(self):
arr=[]
for x in self.systems:
if hasattr(x,"particles"):
arr.append(x.particles)
return datamodel.ParticlesSuperset(arr)
@property
def gas_particles(self):
arr=[]
for x in self.systems:
if hasattr(x,"gas_particles"):
arr.append(x.gas_particles)
return datamodel.ParticlesSuperset(arr)
# 'private' functions
def drift_systems_dt(self,dt):
self._drift_time+=dt
self.drift_systems(self._drift_time)
def drift_systems(self,tend):
threads=[]
for x in self.systems:
if hasattr(x,"evolve_model"):
offset=self.time_offsets[x]
if(self.verbose):
print("evolving", x.__class__.__name__, end=' ')
threads.append(threading.Thread(target=x.evolve_model, args=(tend-offset,)) )
if self.use_threading:
for x in threads:
x.start()
for x in threads:
x.join()
else:
for x in threads:
x.run()
if(self.verbose):
print(".. done")
return 0
def kick_systems(self,dt):
for x in self.systems:
if self.do_sync[x]:
if hasattr(x,"synchronize_model"):
if(self.verbose): print(x.__class__.__name__,"is synchronizing", end=' ')
x.synchronize_model()
if(self.verbose): print(".. done")
for x in self.systems:
if hasattr(x,"particles") and len(x.particles)>0:
for y in self.partners[x]:
if x is not y:
if(self.verbose): print(x.__class__.__name__,"receives kick from",y.__class__.__name__, end=' ')
kick_system(x,y.get_gravity_at_point,dt)
if(self.verbose): print(".. done")
return 0
| 11,167
| 35.858086
| 121
|
py
|
amuse
|
amuse-main/src/amuse/ext/relax_sph.py
|
import numpy
from amuse.units import units, nbody_system
from amuse.couple.bridge import Bridge
from amuse.plot import native_plot, semilogy, loglog, xlabel, ylabel
def no_monitoring(system, i_step, time, n_steps):
pass
def monitor_energy(system, i_step, time, n_steps):
unit = units.J
U = system.potential_energy.value_in(unit)
Q = system.thermal_energy.value_in(unit)
K = system.kinetic_energy.value_in(unit)
print("Step {0}, t={1}: U={2:.2e}, Q={3:.2e}, K={4:.2e} {5}".format(
i_step, time.as_quantity_in(units.yr), U, Q, K, unit))
class Memory:
pass
def monitor_density_profile(system, i_step, time, n_steps, memory=Memory()):
if i_step == 0:
memory.xlimits = (None, None)
memory.ylimits = (None, None)
position = system.gas_particles.position - system.gas_particles.center_of_mass()
loglog(position.lengths_squared(), system.gas_particles.density, 'gs')
native_plot.title("{0}: t={1}".format(i_step, time.as_quantity_in(units.yr)))
native_plot.xlim(memory.xlimits)
native_plot.ylim(memory.ylimits)
native_plot.pause(0.0001)
memory.xlimits = native_plot.gca().get_xlim()
memory.ylimits = native_plot.gca().get_ylim()
if i_step == n_steps-1:
native_plot.show(block=True)
native_plot.cla()
def relax(gas_particles, hydro, gravity_field=None, monitor_func=no_monitoring,
bridge_options=dict()):
"""
Relax a set of SPH particles by evolving it with a hydrodynamics code, while
imposing critical damping on the particle velocities.
:argument gas_particles: The set of SPH particles
:argument hydro: The hydrodynamics code
:argument gravity_field Background gravitational field, must support get_gravity_at_point
:argument monitor_func For monitoring progress each step. User-defined function or "energy"
:argument bridge_options: Keyword options passed to Bridge
"""
if monitor_func == "energy":
monitor_func = monitor_energy
t_end_in_t_dyn = 2.5 # Relax for this many dynamical timescales
t_end = t_end_in_t_dyn * gas_particles.dynamical_timescale(mass_fraction=0.9)
n_steps = 250
velocity_damp_factor = 1.0 - (2.0*numpy.pi*t_end_in_t_dyn)/n_steps # Critical damping
in_hydro = hydro.gas_particles.add_particles(gas_particles)
if gravity_field is None:
system = hydro
else:
system = Bridge(timestep=(t_end/n_steps).as_quantity_in(units.yr), **bridge_options)
system.add_system(hydro, [gravity_field])
for i_step, time in enumerate(t_end * numpy.linspace(1.0/n_steps, 1.0, n_steps)):
system.evolve_model(time)
hydro.gas_particles.velocity = velocity_damp_factor * hydro.gas_particles.velocity
monitor_func(system, i_step, time, n_steps)
return in_hydro.copy()
if __name__ == "__main__":
from amuse.io import write_set_to_file
from amuse.ext.spherical_model import new_gas_plummer_distribution, new_plummer_distribution
from amuse.community.gadget2.interface import Gadget2
from amuse.community.fastkick.interface import FastKick
gas = new_gas_plummer_distribution(1000, virial_radius=1|units.parsec, total_mass=1000|units.MSun, type="fcc")
stars = new_plummer_distribution(10, virial_radius=1|units.parsec, total_mass=100|units.MSun, type="sobol")
dynamical_timescale = gas.dynamical_timescale()
converter = nbody_system.nbody_to_si(dynamical_timescale, 1|units.parsec)
hydro = Gadget2(converter, number_of_workers=2)
hydro.parameters.time_max = 3 * dynamical_timescale
hydro.parameters.max_size_timestep = dynamical_timescale / 100
hydro.parameters.time_limit_cpu = 1.0 | units.Gyr
gravity_field_code = FastKick(converter)
gravity_field_code.particles.add_particles(stars)
relaxed_gas = relax(gas, hydro, gravity_field=gravity_field_code, monitor_func="energy", bridge_options=dict(verbose=True))
gravity_field_code.stop()
hydro.stop()
write_set_to_file(relaxed_gas, "gas_relaxed.amuse", "amuse")
| 4,067
| 43.217391
| 127
|
py
|
amuse
|
amuse-main/src/amuse/ext/sticky_spheres.py
|
import sys
import numpy
from amuse.units import units
from amuse.datamodel import Particles
from amuse.support.exceptions import AmuseException
class StickySpheres(object):
"""
Resolves collisions between particles by treating them as "sticky spheres",
i.e. perfectly inelastic collisions. Mass and momentum are conserved, while
all energy is lost in the center of mass frame.
Optionally a mass_loss fraction (between 0 and 1) can be given. In this case,
a fraction of the total mass escapes the system during the collision,
carrying away momentum and energy. The velocity of the center of mass is
still conserved.
"""
stellar_evolution_code_required = False
gravity_code_required = False
def __init__(self, mass_loss=0):
if 0 <= mass_loss < 1:
self.mass_loss = mass_loss
else:
raise AmuseException("Mass-loss fraction must be in the range [0, 1)")
def handle_collision(self, primary, secondary):
colliders = primary + secondary
result = Particles(1)
result.mass = colliders.total_mass() * (1 - self.mass_loss)
result.position = colliders.center_of_mass()
result.velocity = colliders.center_of_mass_velocity()
if hasattr(colliders, "radius"):
result.radius = colliders.radius.amax()
return result
| 1,386
| 33.675
| 82
|
py
|
amuse
|
amuse-main/src/amuse/ext/star_to_sph.py
|
import numpy
import os.path
import pickle
from collections import namedtuple
from amuse.community.gadget2.interface import Gadget2
from amuse.ext.spherical_model import EnclosedMassInterpolator
from amuse.ext.spherical_model import new_spherical_particle_distribution
from amuse.support.exceptions import AmuseException, AmuseWarning
from amuse.units.quantities import zero
from amuse.units import units, constants
from amuse.units.generic_unit_converter import ConvertBetweenGenericAndSiUnits
from amuse.support.console import set_printing_strategy
from amuse.datamodel import Particles, Particle
__all__ = ["StellarModel2SPH", "convert_stellar_model_to_SPH", "pickle_stellar_model"]
StellarModelInSPH = namedtuple('StellarModelInSPH', ['gas_particles', 'core_particle', 'core_radius'])
class StellarModel2SPH(object):
"""
Requests the internal structure of the star from a Stellar Evolution
legacy code and converts it into an SPH model consisting of the
specified number of particles. Useful for merging stars.
:argument particle: Star particle to be converted to an SPH model
:argument number_of_sph_particles: Number of gas particles in the resulting model
:argument with_core_particle: Model the core as a heavy, non-sph particle
:argument target_core_mass: If (with_core_particle): target mass for the non-sph particle
:argument do_relax: Relax the SPH model - doesn't seem to work satisfactorily yet!
:argument pickle_file: If provided, read stellar structure from here instead of using 'particle'
:argument do_store_composition: If set, store the local chemical composition on each particle
:argument base_grid_options: dict() with options for the initial distribution,
see new_uniform_spherical_particle_distribution
"""
def __init__(self, particle, number_of_sph_particles, seed = None,
do_relax = False, sph_code = Gadget2, compatible_converter = ConvertBetweenGenericAndSiUnits,
with_core_particle = False, target_core_mass = None, pickle_file = None,
do_store_composition = True, gamma=5.0/3.0, base_grid_options = dict(type = "bcc")):
self.particle = particle
self.number_of_sph_particles = number_of_sph_particles
self.with_core_particle = with_core_particle
self.target_core_mass = target_core_mass
self.core_radius = None
self.core_mass = None
self.pickle_file = pickle_file
if seed:
numpy.random.seed(seed)
self.do_store_composition = do_store_composition
self.gamma = gamma
self.base_grid_options = base_grid_options
self.do_relax = do_relax
self.sph_code = sph_code # used to relax the SPH model
self.compatible_converter = compatible_converter
def retrieve_stellar_structure(self):
self.number_of_zones = self.particle.get_number_of_zones()
if self.do_store_composition:
self.number_of_species = self.particle.get_number_of_species()
self.species_names = self.particle.get_names_of_species(number_of_species = self.number_of_species)
self.composition_profile = self.particle.get_chemical_abundance_profiles(
number_of_zones = self.number_of_zones, number_of_species = self.number_of_species)
self.density_profile = self.particle.get_density_profile(number_of_zones = self.number_of_zones)
self.radius_profile = self.particle.get_radius_profile(number_of_zones = self.number_of_zones)
temperature_profile = self.particle.get_temperature_profile(number_of_zones = self.number_of_zones)
self.mu_profile = self.particle.get_mu_profile(number_of_zones = self.number_of_zones)
self.specific_internal_energy_profile = (1.5 * constants.kB * temperature_profile / self.mu_profile).as_quantity_in(units.m**2/units.s**2)
# Note: self.radius is in increasing order; from center to surface
radius_profile = [0] | units.m
radius_profile.extend(self.radius_profile) # outer radius of each mesh zone
self.midpoints_profile = -(radius_profile[1:2])/2 # dummy element to handle boundaries correctly
self.midpoints_profile.extend((radius_profile[1:] + radius_profile[:-1])/2) # real midpoints of each mesh zone
self.midpoints_profile.append(2*self.midpoints_profile[-1] - self.midpoints_profile[-2]) # dummy element to handle boundaries correctly
self.mass = self.particle.mass
self.radius = self.particle.radius
def unpickle_stellar_structure(self):
if os.path.isfile(self.pickle_file):
infile = open(self.pickle_file, 'rb')
else:
raise AmuseException("Input pickle file '{0}' does not exist".format(self.pickle_file))
structure = pickle.load(infile)
self.mass = structure['mass']
self.radius = structure['radius']
self.number_of_zones = structure['number_of_zones']
self.number_of_species = structure['number_of_species']
self.species_names = structure['species_names']
self.density_profile = structure['density_profile']
self.radius_profile = structure['radius_profile']
self.mu_profile = structure['mu_profile']
self.composition_profile = structure['composition_profile']
self.specific_internal_energy_profile = structure['specific_internal_energy_profile']
self.midpoints_profile = structure['midpoints_profile']
def setup_core_parameters(self):
if self.target_core_mass is None:
if hasattr(self.particle, "core_mass"):
self.target_core_mass = self.particle.core_mass
else:
raise AmuseException("Requested model has with_core_particle=True, but no target_core_mass specified.")
self.original_entropy = (self.gamma - 1.0) * (self.specific_internal_energy_profile *
self.density_profile**(1.0-self.gamma))
interpolator = EnclosedMassInterpolator()
interpolator.initialize(self.radius_profile, self.density_profile)
i_edge = numpy.searchsorted(interpolator.enclosed_mass.number, self.target_core_mass.value_in(interpolator.enclosed_mass.unit))-1
min_i = i_edge
max_i = len(self.radius_profile)-3
enclosed_mass_edge = interpolator.enclosed_mass[min_i+1]
min_enclosed_mass_residual = self.construct_model_with_core(min_i, enclosed_mass_edge, self.gamma)
enclosed_mass_edge = interpolator.enclosed_mass[max_i+1]
max_enclosed_mass_residual = self.construct_model_with_core(max_i, enclosed_mass_edge, self.gamma)
while (max_enclosed_mass_residual < zero) and (max_i > min_i + 20):
max_i -= 20
enclosed_mass_edge = interpolator.enclosed_mass[max_i+1]
max_enclosed_mass_residual = self.construct_model_with_core(max_i, enclosed_mass_edge, self.gamma)
if (min_enclosed_mass_residual > zero) or (max_enclosed_mass_residual < zero):
raise AmuseException("Requested target_core_mass of {0} is out of range.".format(self.target_core_mass))
while max_i - min_i > 1:
next_i = (max_i + min_i)//2
enclosed_mass_edge = interpolator.enclosed_mass[next_i+1]
enclosed_mass_residual = self.construct_model_with_core(next_i, enclosed_mass_edge, self.gamma)
if enclosed_mass_residual >= zero:
max_i = next_i
else:
min_i = next_i
if enclosed_mass_residual < zero:
enclosed_mass_edge = interpolator.enclosed_mass[max_i+1]
self.construct_model_with_core(max_i, enclosed_mass_edge, self.gamma)
self.density_profile = self.rho
self.specific_internal_energy_profile = self.u
interpolator.initialize(self.radius_profile, self.density_profile)
self.core_mass = self.mass - interpolator.enclosed_mass[-1]
self.core_radius = self.radius_profile[max_i] / 2.8
self.mass = self.mass - self.core_mass
def construct_model_with_core(self, i_edge, m_enc_edge, gamma):
r = self.radius_profile
rho = self.density_profile * 1
u = self.specific_internal_energy_profile * 1
m_enc = m_enc_edge
r_c = r[i_edge]
entropy = self.original_entropy * 1
#~ entropy[:i_edge+1] = (entropy[:i_edge+1] + entropy[i_edge+1]) / 2.0
entropy[:i_edge+1] = entropy[i_edge+1]
d_entropy = entropy[1:] - entropy[:-1]
# Integrals over r**2 times the cubic spline kernel W of Monaghan & Lattanzio (1985)
# W = 8 / (pi * r_c**3) * (1 - 6 (r/r_c)**2 + 6 (r/r_c)**3) for 0 < r/r_c < 0.5
# W = 8 / (pi * r_c**3) * 2 * (1 - (r/r_c)**3) for 0.5 < r/r_c < 1
def int_Wr2_A(x): # integral over [(pi*r_c**3)/8 * r**2 * W(r)] for 0 < r/r_c < 0.5
return (x**3 / 3.0) - (1.2 * x**5 / r_c**2) + (x**6 / r_c**3)
def int_Wr2_B(x): # integral over [(pi*r_c**3)/8 * r**2 * W(r)] for 0.5 < r/r_c < 1
return (x**3 / 1.5) - (1.5 * x**4 / r_c) + (1.2 * x**5 / r_c**2) - (x**6 / (3.0 * r_c**3))
for i in range(i_edge, 0, -1):
r_out = r[i]
r_in = r[i-1]
if r_out < 0.5 * r_c:
W_int_m_enc = int_Wr2_A(r_out) - int_Wr2_A(r_in)
elif r_in > 0.5 * r_c:
W_int_m_enc = int_Wr2_B(r_out) - int_Wr2_B(r_in)
else:
W_int_m_enc = int_Wr2_B(r_out) - int_Wr2_B(r_c/2.0) + int_Wr2_A(r_c/2.0) - int_Wr2_A(r_in)
delta_rho = (rho[i]**(2.0-gamma) * constants.G * m_enc * (r_out -r_in) / (gamma * entropy[i] * r[i]**2) +
rho[i] * d_entropy[i] / (gamma * entropy[i]))
m_enc -= 4.0 * constants.pi * rho[i] * (r_out**3 - r_in**3) / 3.0 + 32 * self.target_core_mass * W_int_m_enc / r_c**3
if m_enc < zero:
break
rho[i-1] = rho[i] + delta_rho
u[i-1] = entropy[i-1] * rho[i-1]**(gamma-1.0) / (gamma-1.0)
self.rho = rho
self.u = u
return m_enc
def get_index(self, value, sorted_vector):
if not sorted_vector[0] <= value <= sorted_vector[-1]:
raise AmuseException("Can't find a valid index. {0} is not in "
"the range [{1}, {2}].".format(value, sorted_vector[0], sorted_vector[-1]))
index = numpy.searchsorted(sorted_vector, value)
return max(index - 1, 0)
def get_indices(self, values, sorted_vector):
values = numpy.array(values.value_in(sorted_vector.unit))
sorted_vector = sorted_vector.number
if values.min() < sorted_vector[0] or values.max() > sorted_vector[-1]:
raise AmuseException("Can't find a valid index. Value not in "
"the range [{0}, {1}].".format(sorted_vector[0], sorted_vector[-1]))
indices = numpy.maximum(numpy.searchsorted(sorted_vector, values) - 1, 0)
return indices
def calculate_interpolation_coefficients(self, radial_positions):
# indices = numpy.array([self.get_index(r, self.midpoints_profile) for r in radial_positions])
indices=self.get_indices(radial_positions,self.midpoints_profile)
delta = (self.midpoints_profile[indices+1] - radial_positions) / (
self.midpoints_profile[indices+1] - self.midpoints_profile[indices])
return indices, delta
def interpolate_internal_energy(self, radial_positions, do_composition_too = True):
indices, delta = self.calculate_interpolation_coefficients(radial_positions)
one_minus_delta = 1 - delta
extended = self.specific_internal_energy_profile.copy()
extended.prepend(self.specific_internal_energy_profile[0])
extended.append(self.specific_internal_energy_profile[-1])
interpolated_energies = extended[indices]*delta + extended[indices+1]*one_minus_delta
if do_composition_too:
comp = []
for species in self.composition_profile:
extended = list(species[:1])
extended.extend(species)
extended.append(species[-1])
extended = numpy.asarray(extended)
comp.append(extended[indices]*delta + extended[indices+1]*one_minus_delta)
comp = numpy.asarray(comp)
extended = self.mu_profile.copy()
extended.prepend(self.mu_profile[0])
extended.append(self.mu_profile[-1])
mu = extended[indices]*delta + extended[indices+1]*one_minus_delta
return interpolated_energies, comp.transpose(), mu.as_quantity_in(self.mu_profile.unit)
else:
return interpolated_energies, None, None
def convert_to_SPH(self):
sph_particles = new_spherical_particle_distribution(
self.number_of_sph_particles,
radii = self.radius_profile, densities = self.density_profile,
**self.base_grid_options
)
sph_particles.mass = (self.mass.number * 1.0 /
self.number_of_sph_particles) | self.mass.unit
sph_particles.velocity = [0,0,0] | units.m/units.s
# Crude estimate of the smoothing length; the SPH code will calculate the true value itself.
sph_particles.h_smooth = (self.radius * (self.number_of_sph_particles/50.0)**(-1/3.0)).as_quantity_in(self.radius.unit)
return sph_particles
def relax(self, particles):
num_iterations = 20
max_delta = 0.01 # maximum change to particle positions relative to its smoothing length
result = []
previous_acc = 0 | units.m / units.s**2
unit_converter = self.compatible_converter(self.radius, self.mass, 1.0e-3 | units.s)
hydro_code = self.sph_code(unit_converter)
particles.u = 1.0 | (units.m / units.s)**2
hydro_code.gas_particles.add_particles(particles)
for i in range(1, num_iterations+1):
hydro_code.gas_particles.u, tmp, tmp2 = self.interpolate_internal_energy(particles.position.lengths(), do_composition_too = False)
hydro_code.evolve_model(i * (1.0e-5 | units.s))
accelerations = hydro_code.gas_particles.acceleration
acc_correlated = (previous_acc * accelerations).sum() / (accelerations * accelerations).sum()
if (acc_correlated < 0.5 | units.none and i > 2):
break
previous_acc = accelerations
internal_energies = hydro_code.gas_particles.u
smoothing_lengths = hydro_code.gas_particles.h_smooth
factor = numpy.minimum((max_delta * internal_energies / (accelerations.lengths() * smoothing_lengths)), 0.5)
result.append(str(i) + ": Accelerations correlated: " + str(acc_correlated) + ", median factor: " + str(numpy.median(factor)))
particles.position += accelerations * ((smoothing_lengths * smoothing_lengths * factor) /
internal_energies).reshape((self.number_of_sph_particles, 1))
hydro_code.gas_particles.position = particles.position
hydro_code.gas_particles.velocity = particles.velocity
particles.u = hydro_code.gas_particles.u
hydro_code.stop()
if i == num_iterations:
print("\nUnable to converge to stable SPH model within {0} iterations.".format(num_iterations))
else:
print("\nSuccessfully converged to stable SPH model within {0} iterations.".format(i-1))
return result
@property
def result(self):
if self.pickle_file is None:
self.retrieve_stellar_structure()
else:
self.unpickle_stellar_structure()
if self.with_core_particle:
self.setup_core_parameters()
sph_particles = self.convert_to_SPH()
if self.do_relax:
for result_string in self.relax(sph_particles):
print(result_string)
specific_internal_energy, composition, mu = self.interpolate_internal_energy(
sph_particles.position.lengths(),
do_composition_too = self.do_store_composition
)
sph_particles.u = specific_internal_energy
if self.do_store_composition:
sph_particles.add_vector_attribute("composition", self.species_names)
sph_particles.composition = composition
sph_particles.mu = mu
if self.with_core_particle and self.core_radius:
core_particle = Particle()
core_particle.mass = self.core_mass
core_particle.position = [0.0, 0.0, 0.0] | units.m
core_particle.velocity = [0.0, 0.0, 0.0] | units.m / units.s
core_particle.radius = self.core_radius
return StellarModelInSPH(gas_particles=sph_particles, core_particle=core_particle, core_radius=self.core_radius)
return StellarModelInSPH(gas_particles=sph_particles, core_particle=None, core_radius=None)
def convert_stellar_model_to_sph(particle, number_of_sph_particles, **keyword_arguments):
"""
Requests the internal structure of the star from a Stellar Evolution
legacy code and converts it into an SPH model consisting of the
specified number of particles. Useful for merging stars.
:argument particle: Star particle to be converted to an SPH model
:argument number_of_sph_particles: Number of gas particles in the resulting model
:argument with_core_particle: Model the core as a heavy, non-sph particle
:argument target_core_mass: If (with_core_particle): target mass for the non-sph particle
:argument do_relax: Relax the SPH model - doesn't seem to work satisfactorily yet!
:argument pickle_file: If provided, read stellar structure from here instead of using 'particle'
:argument do_store_composition: If set, store the local chemical composition on each particle
:argument base_grid_options: dict() with options for the initial distribution,
see new_uniform_spherical_particle_distribution
"""
converter = StellarModel2SPH(particle, number_of_sph_particles, **keyword_arguments)
return converter.result
# for compatibility
convert_stellar_model_to_SPH = convert_stellar_model_to_sph
def pickle_stellar_model(particle, pickle_file_name):
"""
Requests the internal structure of the star from a Stellar Evolution community
code and pickles it (stores it as a *.pkl file), for later use:
convert_stellar_model_to_sph(None, ..., pickle_file=pickle_file_name)
Using a pickled stellar model is significantly faster for modelling giants
and other extremely evolved stars.
:argument particle: Star particle to be converted to an SPH model later
:argument pickle_file_name: Name of the pickle file in which to store the stellar structure
"""
if os.path.isdir(os.path.dirname(os.path.abspath(pickle_file_name))) and not os.path.exists(pickle_file_name):
outfile = open(pickle_file_name, 'wb')
else:
raise AmuseWarning("Incorrect file name '{0}'; directory must exist and "
"file may not exist".format(pickle_file_name))
converter = StellarModel2SPH(particle, None)
converter.retrieve_stellar_structure()
pickle.dump(dict(
mass = converter.mass,
radius = converter.radius,
number_of_zones = converter.number_of_zones,
number_of_species = converter.number_of_species,
species_names = converter.species_names,
density_profile = converter.density_profile,
radius_profile = converter.radius_profile,
mu_profile = converter.mu_profile,
composition_profile = converter.composition_profile,
specific_internal_energy_profile = converter.specific_internal_energy_profile,
midpoints_profile = converter.midpoints_profile
), outfile)
| 19,994
| 51.206266
| 146
|
py
|
amuse
|
amuse-main/src/amuse/ext/cosmo.py
|
# simple cosmology calc.
# to be extended as needed
import numpy
from amuse.units import units, generic_unit_system
from amuse.units.quantities import to_quantity
from amuse.datamodel import Particles
from amuse.support.exceptions import AmuseException
def findbin(ylist,y):
s=1
if ylist[0]>=ylist[-1]:
s=-1
if s*y <= s*ylist[0]:
return -1
if s*y >= s*ylist[-1]:
return len(ylist)
up=len(ylist)-1
low=0
while up-low>1:
b=(low+up)/2
if s*y < s*ylist[b]:
up=b
else:
low=b
return up
class Hermitelookup(object):
def __init__(self,xlist,ylist,yderiv):
self.xlist=xlist
self.ylist=ylist
self.yderiv=yderiv
def interpolatecubic(self,x, b):
if b <= 0:
return self.ylist[0]
if b > len(self.ylist)-1:
return self.ylist[-1]
dx=self.xlist[b]-self.xlist[b-1]
dy=self.ylist[b]-self.ylist[b-1]
if dx==0.:
return (self.ylist[b-1]+self.ylist[b])/2
y1=self.ylist[b-1]
yd2=self.yderiv[b]
yd1=self.yderiv[b-1]
u=(x-self.xlist[b-1])/(self.xlist[b]-self.xlist[b-1])
return u**3*(-2*dy+dx*(yd1+yd2))+u**2*(3*dy-dx*(2*yd1+yd2))+dx*yd1*u+y1
def evaluate(self,x):
return self.interpolatecubic(x,findbin(self.xlist,x))
class Cosmology(object):
def __init__(self, # default=fifth year wmap+BAO+SN parameters, hinshaw 2008
omega=1.,
omegal = 0.726,
omegak = 0.,
omegar = 8.37e-5, # 4.165E-5/(h*h) includes 3 massless neutrino species, T0 = 2.72528
h = 0.705,
sigma8 = 0.812,
n=1000,amax=1.):
self.omega=omega
self.omegal=omegal
self.omegar=omegar
self.omegak=omegak
self.hubble0=h*(100 | units.kms/units.Mpc)
self.omegam = omega - (omegak + omegar + omegal)
self.n=n
a=amax*(numpy.array(list(range(self.n+1)))/float(self.n))**2
t=[0.]
dtda=[0.]
dadt=[0.]
for i in range(1,self.n+1):
_t=t[-1]+1./6.*( self.invfriedmanint(a[i])+
self.invfriedmanint(a[i-1])+
4*self.invfriedmanint((a[i]+a[i-1])/2) )*(a[i]-a[i-1])
t.append( _t )
dtda.append(self.invfriedmanint(a[i]))
dadt.append(1./dtda[-1])
self.a=a
self.t=numpy.array(t)
self.dtda=numpy.array(dtda)
self.dadt=numpy.array(dadt)
self.age_lookup=Hermitelookup(self.a,self.t,self.dtda)
self.a_lookup=Hermitelookup(self.t,self.a,self.dadt)
def invfriedmanint(self,a):
return a/(self.omegam*a+self.omegar+self.omegal*a**4+self.omegak*a**2)**0.5
def hubble(self,a):
return self.hubble0*self.dadtau(a)/a
def dadtau(self,a):
return (self.omegam/a+self.omegar/a**2+self.omegal*a**2+self.omegak)**0.5
def d2adtau2(self,a):
return -1./2.*self.omegam/a**2-self.omegar/a**3+self.omegal*a
def agefromz(self,z):
return self.agefroma(1./(z+1.))
def taufromz(self,z):
return self.taufroma(1./(z+1.))
def agefroma(self,a):
return (self.age_lookup.evaluate(a)/self.hubble0)
def taufroma(self,a):
return self.age_lookup.evaluate(a)
def afromage(self,age):
return self.a_lookup.evaluate(age*self.hubble0)
def afromtau(self,tau):
return self.a_lookup.evaluate(tau)
def convert_comoving_to_physical(original, redshift=0.0, hubble_parameter=1.0, attribute_names=None):
"""
Converts quantities or particle sets from comoving coordinates to physical
coordinates. In comoving coordinates, changes in positions due to the
expansion of the universe are corrected for, by dividing by the scale
factor 'a':
a = 1 / (1 + z).
This function will undo this correction for all quantities with units that
are (derived from) length units (e.g. position, velocity, energy, but not
time or mass).
Optionally, a value for the Hubble parameter (value of Hubble constant in
units of 100 km/s/Mpc) can be supplied. If so, the units of 'original' are
assumed to be based on length/h, mass/h, and time/h, instead of length,
mass, and time, respectively. These factors will be divided out in the result
"""
if isinstance(original, Particles):
copy = original.copy()
if attribute_names is None:
attribute_names = copy.get_attribute_names_defined_in_store()
for attribute in attribute_names:
setattr(copy, attribute, convert_quantity_from_comoving_to_physical(
getattr(copy, attribute), redshift, hubble_parameter))
return copy
elif hasattr(original, "unit"):
return convert_quantity_from_comoving_to_physical(original, redshift, hubble_parameter)
else:
raise AmuseException("Can't convert instance of {0} from comoving to physical "
"coordinates (only Particles or Quantity supported)".format(original.__class__))
def convert_quantity_from_comoving_to_physical(original, redshift, hubble_parameter=1.0):
for (exponent, unit) in to_quantity(original).unit.base:
if unit is units.m or unit is generic_unit_system.length:
return original * (1 / (1.0 + redshift))**exponent
return original
if __name__=="__main__":
cosmo=Cosmology(amax=2)
print(cosmo.agefromz(0.).in_(units.Myr))
print(cosmo.agefroma(1.).in_(units.Gyr))
print(cosmo.afromage(cosmo.agefroma(1.5)))
| 5,397
| 32.52795
| 102
|
py
|
amuse
|
amuse-main/src/amuse/ext/gasplummer.py
|
import numpy
from amuse.ext.evrard_test import uniform_unit_sphere
from amuse.units import nbody_system
from amuse.units import units
from amuse import datamodel
class MakePlummerGasModel(object):
def __init__(self, targetN, convert_nbody = None, base_grid=None, rscale=1/1.695,
mass=1.,seed=345672,mass_frac=.999):
numpy.random.seed(seed)
self.targetN = targetN
self.convert_nbody = convert_nbody
self.rscale=rscale
self.mass=mass
self.mass_frac=mass_frac
self.internal_energy=0.25*self.mass/self.rscale
self.base_sphere=uniform_unit_sphere(targetN,base_grid)
def new_model(self):
x,y,z=self.base_sphere.make_xyz()
self.actualN=len(x)
r=numpy.sqrt(x**2+y**2+z**2)*self.mass_frac**(1/3.)
rtarget=self.rscale*(r**2/(1-r**2))**.5
mr=self.mass_frac**(1/3.)
maxr=self.rscale*(mr**2/(1-mr**2))**.5
mass=numpy.ones_like(x)*self.mass/self.actualN
internal_energy=self.internal_energy/(1+(rtarget/self.rscale)**2)**(1./2)
r=r.clip(1.e-8,maxr)
x=rtarget*x/r
y=rtarget*y/r
z=rtarget*z/r
vx=numpy.zeros_like(x)
vy=numpy.zeros_like(x)
vz=numpy.zeros_like(x)
return (mass,x,y,z,vx,vy,vz,internal_energy)
@property
def result(self):
mass,x,y,z,vx,vy,vz,u = self.new_model()
result = datamodel.Particles(self.actualN)
result.mass = nbody_system.mass.new_quantity(mass)
result.x = nbody_system.length.new_quantity(x)
result.y = nbody_system.length.new_quantity(y)
result.z = nbody_system.length.new_quantity(z)
result.vx = nbody_system.speed.new_quantity(vx)
result.vy = nbody_system.speed.new_quantity(vy)
result.vz = nbody_system.speed.new_quantity(vz)
result.u = (nbody_system.speed**2).new_quantity(u)
if not self.convert_nbody is None:
result = datamodel.ParticlesWithUnitsConverted(result, self.convert_nbody.as_converter_from_si_to_generic())
result = result.copy()
return result
if __name__=="__main__":
convert_nbody = nbody_system.nbody_to_si(100. | units.MSun, 1.0 | units.parsec)
sphere=MakePlummerGasModel(10000,convert_nbody)
parts=sphere.result
print(parts[0].internal_energy**0.5)
print(len(parts)*parts[0].mass.in_(units.MSun))
| 2,446
| 37.234375
| 120
|
py
|
amuse
|
amuse-main/src/amuse/ext/sobol.py
|
import math
from numpy import *
__all__ = ["i4_sobol", "i4_sobol_generate"]
def i4_bit_hi1 ( n ):
#*****************************************************************************80
#
## I4_BIT_HI1 returns the position of the high 1 bit base 2 in an integer.
#
# Example:
#
# N Binary BIT
# ---- -------- ----
# 0 0 0
# 1 1 1
# 2 10 2
# 3 11 2
# 4 100 3
# 5 101 3
# 6 110 3
# 7 111 3
# 8 1000 4
# 9 1001 4
# 10 1010 4
# 11 1011 4
# 12 1100 4
# 13 1101 4
# 14 1110 4
# 15 1111 4
# 16 10000 5
# 17 10001 5
# 1023 1111111111 10
# 1024 10000000000 11
# 1025 10000000001 11
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by John Burkardt.
# PYTHON version by Corrado Chisari
#
# Parameters:
#
# Input, integer N, the integer to be measured.
# N should be nonnegative. If N is nonpositive, the value will always be 0.
#
# Output, integer BIT, the number of bits base 2.
#
i = math.floor ( n )
bit = 0
while ( 1 ):
if ( i <= 0 ):
break
bit += 1
i = math.floor ( i / 2. )
return bit
def i4_bit_lo0 ( n ):
#*****************************************************************************80
#
## I4_BIT_LO0 returns the position of the low 0 bit base 2 in an integer.
#
# Example:
#
# N Binary BIT
# ---- -------- ----
# 0 0 1
# 1 1 2
# 2 10 1
# 3 11 3
# 4 100 1
# 5 101 2
# 6 110 1
# 7 111 4
# 8 1000 1
# 9 1001 2
# 10 1010 1
# 11 1011 3
# 12 1100 1
# 13 1101 2
# 14 1110 1
# 15 1111 5
# 16 10000 1
# 17 10001 2
# 1023 1111111111 1
# 1024 10000000000 1
# 1025 10000000001 1
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by John Burkardt.
# PYTHON version by Corrado Chisari
#
# Parameters:
#
# Input, integer N, the integer to be measured.
# N should be nonnegative.
#
# Output, integer BIT, the position of the low 1 bit.
#
bit = 0
i = math.floor ( n )
while ( 1 ):
bit = bit + 1
i2 = math.floor ( i / 2. )
if ( i == 2 * i2 ):
break
i = i2
return bit
def i4_sobol_generate ( m, n, skip ):
"""
#*****************************************************************************80
#
## I4_SOBOL_GENERATE generates a Sobol dataset.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original MATLAB version by John Burkardt.
# PYTHON version by Corrado Chisari
#
# Parameters:
#
# Input, integer M, the spatial dimension.
#
# Input, integer N, the number of points to generate.
#
# Input, integer SKIP, the number of initial points to skip.
#
# Output, real R(M,N), the points.
#
"""
r=zeros((m,n))
for j in range (1, n+1):
seed = skip + j - 2
[ r[0:m,j-1], seed ] = i4_sobol ( m, seed )
return r
def i4_sobol ( dim_num, seed ):
"""
#*****************************************************************************80
#
## I4_SOBOL generates a new quasirandom Sobol vector with each call.
#
# Discussion:
#
# The routine adapts the ideas of Antonov and Saleev.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 22 February 2011
#
# Author:
#
# Original FORTRAN77 version by Bennett Fox.
# MATLAB version by John Burkardt.
# PYTHON version by Corrado Chisari
#
# Reference:
#
# Antonov, Saleev,
# USSR Computational Mathematics and Mathematical Physics,
# Volume 19, 1980, pages 252 - 256.
#
# Paul Bratley, Bennett Fox,
# Algorithm 659:
# Implementing Sobol's Quasirandom Sequence Generator,
# ACM Transactions on Mathematical Software,
# Volume 14, Number 1, pages 88-100, 1988.
#
# Bennett Fox,
# Algorithm 647:
# Implementation and Relative Efficiency of Quasirandom
# Sequence Generators,
# ACM Transactions on Mathematical Software,
# Volume 12, Number 4, pages 362-376, 1986.
#
# Ilya Sobol,
# USSR Computational Mathematics and Mathematical Physics,
# Volume 16, pages 236-242, 1977.
#
# Ilya Sobol, Levitan,
# The Production of Points Uniformly Distributed in a Multidimensional
# Cube (in Russian),
# Preprint IPM Akad. Nauk SSSR,
# Number 40, Moscow 1976.
#
# Parameters:
#
# Input, integer DIM_NUM, the number of spatial dimensions.
# DIM_NUM must satisfy 1 <= DIM_NUM <= 40.
#
# Input/output, integer SEED, the "seed" for the sequence.
# This is essentially the index in the sequence of the quasirandom
# value to be generated. On output, SEED has been set to the
# appropriate next value, usually simply SEED+1.
# If SEED is less than 0 on input, it is treated as though it were 0.
# An input value of 0 requests the first (0-th) element of the sequence.
#
# Output, real QUASI(DIM_NUM), the next quasirandom vector.
#
"""
global atmost
global dim_max
global dim_num_save
global initialized
global lastq
global log_max
global maxcol
global poly
global recipd
global seed_save
global v
if ( not 'initialized' in globals().keys() ):
initialized = 0
dim_num_save = -1
if ( not initialized or dim_num != dim_num_save ):
initialized = 1
dim_max = 40
dim_num_save = -1
log_max = 30
seed_save = -1
#
# Initialize (part of) V.
#
v = zeros((dim_max,log_max))
v[0:40,0] = transpose([ \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ])
v[2:40,1] = transpose([ \
1, 3, 1, 3, 1, 3, 3, 1, \
3, 1, 3, 1, 3, 1, 1, 3, 1, 3, \
1, 3, 1, 3, 3, 1, 3, 1, 3, 1, \
3, 1, 1, 3, 1, 3, 1, 3, 1, 3 ])
v[3:40,2] = transpose([ \
7, 5, 1, 3, 3, 7, 5, \
5, 7, 7, 1, 3, 3, 7, 5, 1, 1, \
5, 3, 3, 1, 7, 5, 1, 3, 3, 7, \
5, 1, 1, 5, 7, 7, 5, 1, 3, 3 ])
v[5:40,3] = transpose([ \
1, 7, 9,13,11, \
1, 3, 7, 9, 5,13,13,11, 3,15, \
5, 3,15, 7, 9,13, 9, 1,11, 7, \
5,15, 1,15,11, 5, 3, 1, 7, 9 ])
v[7:40,4] = transpose([ \
9, 3,27, \
15,29,21,23,19,11,25, 7,13,17, \
1,25,29, 3,31,11, 5,23,27,19, \
21, 5, 1,17,13, 7,15, 9,31, 9 ])
v[13:40,5] = transpose([ \
37,33, 7, 5,11,39,63, \
27,17,15,23,29, 3,21,13,31,25, \
9,49,33,19,29,11,19,27,15,25 ])
v[19:40,6] = transpose([ \
13, \
33,115, 41, 79, 17, 29,119, 75, 73,105, \
7, 59, 65, 21, 3,113, 61, 89, 45,107 ])
v[37:40,7] = transpose([ \
7, 23, 39 ])
#
# Set POLY.
#
poly= [ \
1, 3, 7, 11, 13, 19, 25, 37, 59, 47, \
61, 55, 41, 67, 97, 91, 109, 103, 115, 131, \
193, 137, 145, 143, 241, 157, 185, 167, 229, 171, \
213, 191, 253, 203, 211, 239, 247, 285, 369, 299 ]
atmost = 2**log_max - 1
#
# Find the number of bits in ATMOST.
#
maxcol = i4_bit_hi1 ( atmost )
#
# Initialize row 1 of V.
#
v[0,0:maxcol] = 1
#
# Things to do only if the dimension changed.
#
if ( dim_num != dim_num_save ):
#
# Check parameters.
#
if ( dim_num < 1 or dim_max < dim_num ):
print('I4_SOBOL - Fatal error!')
print(' The spatial dimension DIM_NUM should satisfy:')
print(' 1 <= DIM_NUM <= %d'%dim_max)
print(' But this input value is DIM_NUM = %d'%dim_num)
return
dim_num_save = dim_num
#
# Initialize the remaining rows of V.
#
for i in range(2 , dim_num+1):
#
# The bits of the integer POLY(I) gives the form of polynomial I.
#
# Find the degree of polynomial I from binary encoding.
#
j = poly[i-1]
m = 0
while ( 1 ):
j = math.floor ( j / 2. )
if ( j <= 0 ):
break
m = m + 1
#
# Expand this bit pattern to separate components of the logical array INCLUD.
#
j = poly[i-1]
includ=zeros(m)
for k in range(m, 0, -1):
j2 = math.floor ( j / 2. )
includ[k-1] = (j != 2 * j2 )
j = j2
#
# Calculate the remaining elements of row I as explained
# in Bratley and Fox, section 2.
#
for j in range( m+1, maxcol+1 ):
newv = v[i-1,j-m-1]
l = 1
for k in range(1, m+1):
l = 2 * l
if ( includ[k-1] ):
newv = bitwise_xor ( int(newv), int(l * v[i-1,j-k-1]) )
v[i-1,j-1] = newv
#
# Multiply columns of V by appropriate power of 2.
#
l = 1
for j in range( maxcol-1, 0, -1):
l = 2 * l
v[0:dim_num,j-1] = v[0:dim_num,j-1] * l
#
# RECIPD is 1/(common denominator of the elements in V).
#
recipd = 1.0 / ( 2 * l )
lastq=zeros(dim_num)
seed = int(math.floor ( seed ))
if ( seed < 0 ):
seed = 0
if ( seed == 0 ):
l = 1
lastq=zeros(dim_num)
elif ( seed == seed_save + 1 ):
#
# Find the position of the right-hand zero in SEED.
#
l = i4_bit_lo0 ( seed )
elif ( seed <= seed_save ):
seed_save = 0
l = 1
lastq=zeros(dim_num)
for seed_temp in range( int(seed_save), int(seed)):
l = i4_bit_lo0 ( seed_temp )
for i in range(1 , dim_num+1):
lastq[i-1] = bitwise_xor ( int(lastq[i-1]), int(v[i-1,l-1]) )
l = i4_bit_lo0 ( seed )
elif ( seed_save + 1 < seed ):
for seed_temp in range( int(seed_save + 1), int(seed) ):
l = i4_bit_lo0 ( seed_temp )
for i in range(1, dim_num+1):
lastq[i-1] = bitwise_xor ( int(lastq[i-1]), int(v[i-1,l-1]) )
l = i4_bit_lo0 ( seed )
#
# Check that the user is not calling too many times!
#
if ( maxcol < l ):
print('I4_SOBOL - Fatal error!')
print(' Too many calls!')
print(' MAXCOL = %d\n'%maxcol)
print(' L = %d\n'%l)
return
#
# Calculate the new components of QUASI.
#
quasi=zeros(dim_num)
for i in range( 1, dim_num+1):
quasi[i-1] = lastq[i-1] * recipd
lastq[i-1] = bitwise_xor ( int(lastq[i-1]), int(v[i-1,l-1]) )
seed_save = seed
seed = seed + 1
return [ quasi, seed ]
| 11,837
| 25.662162
| 87
|
py
|
amuse
|
amuse-main/src/amuse/ext/sink.py
|
"""
Sinks
This module contains functions to create new sink particles. These can be used
to model accretion, for example unto protostars or compact objects.
"""
import numpy
from amuse.units import units, quantities
from amuse.units.quantities import zero, AdaptingVectorQuantity
from amuse.datamodel import Particle, ParticlesOverlay, ParticlesSubset
from amuse.datamodel import Particles, ParticlesSuperset
from amuse.support.exceptions import AmuseException
__all__ = ["new_sink_particles"]
def angular_momentum(mass,position,velocity):
"""
Returns the angular momentum of the particles.
"""
try:
return mass.reshape((-1,1)) * position.cross(velocity)
except:
return mass * position.cross(velocity)
class SinkParticles(ParticlesOverlay):
def __init__(self, original_particles, sink_radius=None, mass=None, position=None,
velocity=None, angular_momentum=None, looping_over="sinks"):
ParticlesOverlay.__init__(self, original_particles)
self._private.looping_over=looping_over
self.sink_radius = sink_radius or original_particles.radius
if not hasattr(original_particles, "mass"):
self.mass = mass or (([0.]*len(self)) | units.kg)
if not hasattr(original_particles, "x"):
self.position=position or (([[0.,0.,0.]]*len(self)) | units.m)
if not hasattr(original_particles, "vx"):
self.velocity=velocity or (([[0.,0.,0.]]*len(self)) | units.m/units.s)
if not hasattr(original_particles, "lx"):
self.angular_momentum=angular_momentum or (([[0.,0.,0.]]*len(self)) | units.g*units.m**2/units.s)
def accrete(self,orgparticles):
if self._private.looping_over=="sinks":
return self.accrete_looping_over_sinks(orgparticles)
else:
return self.accrete_looping_over_sources(orgparticles)
def add_particles_to_store(self, keys, attributes = [], values = []):
(
(attributes_inbase, values_inbase),
(attributes_inoverlay, values_inoverlay)
) = self._split_attributes_and_values(attributes, values)
self._private.overlay_set.add_particles_to_store(keys, attributes_inoverlay, values_inoverlay)
#
# The sink particles have a little different concept of "overlay particles"
# apparently the sink particles are positioned on all particles (the complete superset gas + sink)
# and adding sink_particles will not work, subsets must be summed
#
particles = self._private.base_set._original_set()._subset(keys)
self._private.base_set = self._private.base_set + particles
def add_sinks(self, original_particles, sink_radius=None, mass=None, position=None,
velocity=None, angular_momentum=None):
new_sinks = self.add_particles(original_particles)
new_sinks.sink_radius = sink_radius or original_particles.radius
if not hasattr(original_particles, "mass"):
new_sinks.mass = mass or (([0.]*len(new_sinks)) | units.kg)
if not hasattr(original_particles, "x"):
new_sinks.position=position or (([[0.,0.,0.]]*len(new_sinks)) | units.m)
if not hasattr(original_particles, "vx"):
new_sinks.velocity=velocity or (([[0.,0.,0.]]*len(new_sinks)) | units.m/units.s)
if not hasattr(original_particles, "lx"):
new_sinks.angular_momentum=angular_momentum or (([[0.,0.,0.]]*len(new_sinks)) | units.g*units.m**2/units.s)
def add_sink(self, particle):
self.add_sinks(particle.as_set())
def select_too_close(self, others):
too_close = []
for pos, r_squared in zip(self.position, self.sink_radius**2):
subset = others[(others.position-pos).lengths_squared() < r_squared]
too_close.append(subset)
return too_close
def accrete_looping_over_sinks(self, orgparticles):
particles=orgparticles.copy()
others = (particles - self.get_intersecting_subset_in(particles))
too_close = self.select_too_close(others)
try:
all_too_close = sum(too_close, particles[0:0])
except AmuseException as ex:
too_close = self.resolve_duplicates(too_close, particles)
all_too_close = sum(too_close, particles[0:0])
if len(all_too_close):
self.aggregate_mass(too_close)
orgparticles.remove_particles(all_too_close)
return all_too_close
def resolve_duplicates(self, too_close, particles):
# Find the particles that are within more than one sink's radius
duplicates = particles[0:0]
keys = set()
for subset in too_close:
for particle in subset:
if (particle.key in keys) and (particle.key not in duplicates.key):
duplicates += particle
else:
keys.add(particle.key)
# Determine which sink's attraction is strongest
strongest_sinks = []
for duplicate in duplicates:
candidate_sinks = []
for index, subset in enumerate(too_close):
if duplicate in subset:
candidate_sinks.append(index)
attraction = self[candidate_sinks].mass/(self[candidate_sinks].position-duplicate.position).lengths_squared()
strongest_sinks.append(candidate_sinks[numpy.where(attraction==attraction.amax())[0][0]])
# Define a new list with particles to be accreted, without the duplicates
result = []
for index, subset in enumerate(too_close):
for duplicate, strongest_sink in zip(duplicates, strongest_sinks):
if duplicate in subset and not index == strongest_sink:
subset -= duplicate
result.append(subset)
return result
def accrete_looping_over_sources(self, orgparticles):
if len(self) == 0:
return
particles=orgparticles.copy()
others = (particles - self.get_intersecting_subset_in(particles))
too_close = [particles[0:0] for p in self]
all_too_close=particles[0:0]
positions=self.position
masses=self.mass
sink_radii2=self.sink_radius**2
for p in others:
d2=(positions-p.position).lengths_squared()
a=numpy.where(d2<sink_radii2)[0]
if len(a) > 0:
amin=(d2[a]/masses[a]).argmin()
too_close[a[amin]]+=p
all_too_close+=p
if len(all_too_close):
self.aggregate_mass(too_close)
orgparticles.remove_particles(all_too_close)
return all_too_close
def aggregate_mass(self,too_close):
corrected_masses = AdaptingVectorQuantity()
corrected_positions = AdaptingVectorQuantity()
corrected_velocities = AdaptingVectorQuantity()
corrected_angular_momenta = AdaptingVectorQuantity()
for subset, m, pos, vel, Lin in zip(too_close, self.mass, self.position, self.velocity, self.angular_momentum):
if len(subset):
total_mass = subset.total_mass() + m
cmpos=(m*pos + subset.total_mass()*subset.center_of_mass())/total_mass
cmvel=(m*vel + subset.total_mass()*subset.center_of_mass_velocity())/total_mass
L=Lin+angular_momentum(m,pos-cmpos,vel-cmvel)+angular_momentum(subset.mass,subset.position-cmpos,subset.velocity-cmvel).sum(axis=0)
corrected_masses.append(total_mass)
corrected_positions.append(cmpos)
corrected_velocities.append(cmvel)
corrected_angular_momenta.append(L)
else:
corrected_masses.append(m)
corrected_positions.append(pos)
corrected_velocities.append(vel)
corrected_angular_momenta.append(Lin)
self.mass = corrected_masses
self.position = corrected_positions
self.velocity = corrected_velocities
self.angular_momentum = corrected_angular_momenta
class AbstractShape(object):
"""
Abstract superclass of all shapes.
This class defines common code for all shapes.
"""
def __or__(self, other):
return CompoundShape(self, other)
def within_shape(self, position, sources):
return sources[self.select(position, sources)]
class CompoundShape(AbstractShape):
def __init__(self, *sub_shapes):
self.sub_shapes = sub_shapes
def __or__(self, other_shape):
if hasattr(other_shape, "sub_shapes"):
self.sub_shapes += other_shape.sub_shapes
else:
self.sub_shapes += (other_shape,)
return self
def select(self, position, sources):
selection = numpy.zeros(len(sources), dtype=bool)
for sub_shape in self.sub_shapes:
selection = numpy.logical_or(selection, sub_shape.select(position, sources))
return selection
class Sphere(AbstractShape):
def __init__(self, radius):
self.radius = radius
def select(self, position, sources):
return (sources.position-position).lengths_squared() < self.radius**2
class Spheroid(AbstractShape):
def __init__(self, dimensions, orientation=None):
self.dimensions = dimensions
self.orientation=orientation
def select(self, position, sources):
normalized_shape = self.dimensions / self.dimensions.max()
max_r_squared = self.dimensions.max()**2
rel_position = (sources.position-position)
if self.orientation is not None:
#TODO rotate relative_position using orientation
raise AmuseException("spheroid orientation not implemented yet")
return (rel_position/normalized_shape).lengths_squared() < max_r_squared
class Disc(Spheroid):
def __init__(self, radius, height, **kwargs):
dimensions = quantities.as_vector_quantity([radius, radius, height])
Spheroid.__init__(self, dimensions, **kwargs)
class NonSphericalSinkParticles(SinkParticles):
def __init__(self, original_particles, shapes, *args, **kwargs):
SinkParticles.__init__(self, original_particles, *args, **kwargs)
if isinstance(shapes, AbstractShape):
shapes = [shapes] * len(original_particles)
self.shapes = shapes
def select_too_close(self, sources):
too_close = []
for pos, shape in zip(self.position, self.shapes):
subset = shape.within_shape(pos, sources)
too_close.append(subset)
return too_close
def accrete_looping_over_sources(self, orgparticles):
raise AmuseException("Looping over sources not supported for non spherical sink particles")
def new_sink_particles(original_particles, *list_arguments, **keyword_arguments):
"""
Returns new sink particles. These are bound to the 'original_particles' in
the sense that they share their attributes. However, the sink particles differ
from the original ones in two ways, that is to say, they have:
(1) an additional attribute 'sink_radius'
(2) a function 'accrete(particles)' to accrete from particles those that lie
within this radius.
:argument original_particles: the particles to be modeled as sinks (required)
:argument sink_radius: the radii of the sinks (default: original_particles.radius)
:argument mass: masses of the sinks if not supplied by the original_particles (default: zero)
:argument position: positions of the sinks if not supplied by the original_particles (default: the origin)
:argument shapes: the sink particles can be made non spherical by adding a
shape object for each particle or a single shape for all particles.
Note that this is slower then spherical accretion without a shape added.
"""
shapes = keyword_arguments.pop('shapes', None)
if shapes is None:
return SinkParticles(original_particles, *list_arguments, **keyword_arguments)
else:
return NonSphericalSinkParticles(original_particles, shapes, *list_arguments, **keyword_arguments)
| 12,155
| 40.917241
| 147
|
py
|
amuse
|
amuse-main/src/amuse/ext/comsystem.py
|
"""
function center_of_mass_system generates a thin wrapper class
to a grav dynamics class to calculate the dynamics in the center of mass frame
"""
from amuse.datamodel import Particles
def center_of_mass_system(baseclass):
class comsystem(baseclass):
def __init__(self,*args,**kwargs):
self.particles_accessed=True
self._particles=Particles(0)
baseclass.__init__(self,*args,**kwargs)
def evolve_model(self,*args,**kwargs):
if self.particles_accessed:
self.com_position=self._particles.center_of_mass()
self.com_velocity=self._particles.center_of_mass_velocity()
com_time=self.model_time
self._particles.synchronize_to(self.overridden().particles)
self._particles.new_channel_to(self.overridden().particles).copy_attributes(["mass","radius"])
self.overridden().particles.position=self._particles.position-self.com_position
self.overridden().particles.velocity=self._particles.velocity-self.com_velocity
self.overridden().evolve_model(*args,**kwargs)
self.com_position+=self.com_velocity*(self.model_time-com_time)
self.particles_accessed=False
@property
def particles(self):
if not self.particles_accessed:
self._particles=self.overridden().particles.copy()
self._particles.position+=self.com_position
self._particles.velocity+=self.com_velocity
self.particles_accessed=True
return self._particles
return comsystem
if __name__=="__main__":
from amuse.community.huayno.interface import Huayno
from amuse.ic.plummer import new_plummer_model
from amuse.units import nbody_system
comHuayno=center_of_mass_system(Huayno)
grav=comHuayno()
parts=new_plummer_model(100)
grav.particles.add_particles(parts)
grav.evolve_model(1| nbody_system.time)
print(grav.particles)
| 1,932
| 29.68254
| 103
|
py
|
amuse
|
amuse-main/src/amuse/ext/radial_profile.py
|
import numpy
def radial_profile(r,dat,N=100):
n=len(r)
a=r.argsort()
i=0
if hasattr(r,"unit"):
r_a=[] | r.unit
else:
r_a=[]
if hasattr(dat,"unit"):
dat_a=[] | dat.unit
else:
dat_a=[]
while i < n:
ra=r[a[i:i+N]].sum()/min(n-i,N)
da=dat[a[i:i+N]].sum()/min(n-i,N)
r_a.append(ra)
dat_a.append(da)
i=i+N
if not hasattr(r_a, "unit"):
r_a=numpy.array(r_a)
if not hasattr(dat_a, "unit"):
dens=numpy.array(dat_a)
return r_a,dat_a
def radial_density(r,mass,N=100,dim=3, start_at_zero=False, enforce_finite_bins=True):
if dim==3:
volfac=numpy.pi*4./3.
elif dim==2:
volfac=numpy.pi
else:
volfac=1
n=len(r)
a=r.argsort()
if hasattr(r,"unit"):
r_a=[] | r.unit
else:
r_a=[]
dummy_dens=mass[0]/r[0]**dim
if hasattr(dummy_dens,"unit"):
dens=[] | dummy_dens.unit
else:
dens=[]
oldrshell=r[a[0]]
if start_at_zero:
oldrshell=0.*r[0]
i=0
i1=i
while i < n:
i1=min(n,i1+N)
rshell=r[a[i1-1]]
if rshell!=oldrshell or not enforce_finite_bins:
ra=r[a[i:i1]].sum()/(i1-i)
da=mass[a[i:i1]].sum()/(rshell**dim-oldrshell**dim)
oldrshell=rshell
r_a.append(ra)
dens.append(da)
i=i1
if not hasattr(r_a, "unit"):
r_a=numpy.array(r_a)
if not hasattr(dens, "unit"):
dens=numpy.array(dens)
return r_a,dens/volfac
if __name__=="__main__":
from matplotlib import pyplot
from amuse.ic.plummer import MakePlummerModel
plum=MakePlummerModel(100000).result
r=(plum.x**2+plum.y**2+plum.z**2)**0.5
ra,dens=radial_density(r,plum.mass,100,start_at_zero=True)
ascl=1/1.695
ra=ra.number
dens=dens.number
pyplot.subplot(211)
pyplot.loglog(ra,dens)
pyplot.loglog(ra, 3./4./numpy.pi/ascl**3/(1+(ra**2/ascl**2))**(5./2))
# pyplot.plot(ra,(dens-3./4./numpy.pi/ascl**3/(1+(ra**2/ascl**2))**(5./2))/dens,'r.')
pyplot.subplot(212)
pyplot.plot(plum.x.number,plum.y.number,'r.')
pyplot.show()
| 1,981
| 21.781609
| 86
|
py
|
amuse
|
amuse-main/src/amuse/ext/rotating_bridge.py
|
"""
Wrapper of the bridge-like integrator for amuse. To see how bridge works go to the file bridge.py in this folder.
This bridge makes the integration of eqs of motion in a right hand counterclockwise rotating system.
Usage:
from amuse.ext.composition_methods import *
from amuse.ext.rotating_bridge import Rotating_Bridge
system= Rotating_Bridge(omega, timestep= dt_bridge, verbose= False, method= method)
system.add_system(cluster, (MW,), False)
system.add_system(MW, (), False)
omega: angular velocity of the rotating frame. If an axisymmetric model is used, omega must be 0 kms/kpc
dt_bridge: bridge timestep. By now it is fixed
method: One of the composite methods. The default one is LEAPFROG but it can be used:
SPLIT_4TH_S_M6
SPLIT_4TH_S_M5
SPLIT_4TH_S_M4
SPLIT_6TH_SS_M11
SPLIT_6TH_SS_M13
SPLIT_8TH_SS_M21
SPLIT_10TH_SS_M35
"""
from amuse.support.exceptions import AmuseException
import threading
from amuse.units import quantities
from amuse import datamodel
from amuse.ext.bridge import bridge
import numpy
from numpy import cos,sin
from amuse.datamodel import TransformedParticles
# same as below, retained for legacy
def inertial_to_rotating(t,omega,parts):
x=parts.x
y=parts.y
vx=parts.vx
vy=parts.vy
rotating=parts.copy()
rotating.x=x*cos(omega*t)+y*sin(omega*t)
rotating.y=-x*sin(omega*t)+y*cos(omega*t)
rotating.vx=(vx+y*omega)*cos(omega*t)+(vy-x*omega)*sin(omega*t)
rotating.vy=-(vx+y*omega)*sin(omega*t)+(vy-x*omega)*cos(omega*t)
return rotating
def rotating_to_inertial(t,omega,parts):
return inertial_to_rotating(t,-omega,parts)
class Rotating_Bridge(bridge):
def __init__(self, omega, **kwargs):
timestep=kwargs.pop('timestep', None)
self.omega=omega
self.initial_angle=kwargs.pop('initial_angle', 0.)
bridge.__init__(self, **kwargs)
self.timestep=timestep
def kick_system_rotational(self, system, partners, dt):
parts=system.particles.copy()
ax= quantities.zero
ay= quantities.zero
az= quantities.zero
if(self.verbose):
print(system.__class__.__name__,"receives kick from", end=' ')
for y in partners:
if system is not y:
if(self.verbose):
print(y.__class__.__name__, end=' ')
_ax,_ay,_az= y.get_gravity_at_point(parts.radius,parts.x,parts.y,parts.z)
ax+=_ax
ay+=_ay
az+=_az
if self.omega != quantities.zero:
vx0=parts.vx.copy()
vy0=parts.vy.copy()
omega=2*self.omega
a1_omega=(ax+self.omega**2*parts.x)/omega
a2_omega=(ay+self.omega**2*parts.y)/omega
parts.vx=(vx0-a2_omega)*numpy.cos(omega*dt)+(vy0+a1_omega)*numpy.sin(omega*dt)+a2_omega
parts.vy=-(vx0-a2_omega)*numpy.sin(omega*dt)+(vy0+a1_omega)*numpy.cos(omega*dt)-a1_omega
parts.vz=parts.vz+az*dt
else:
parts.vx=parts.vx+ax*dt
parts.vy=parts.vy+ay*dt
parts.vz=parts.vz+az*dt
channel=parts.new_channel_to(system.particles)
channel.copy_attributes(["vx","vy","vz"])
if(self.verbose):
print(".. done")
def kick_systems(self,dt):
for x in self.systems:
if self.do_sync[x]:
if hasattr(x,"synchronize_model"):
if(self.verbose): print(x.__class__.__name__,"is synchronizing", end=' ')
x.synchronize_model()
if(self.verbose): print(".. done")
for x in self.systems:
if hasattr(x,"particles"):
self.kick_system_rotational(x, self.partners[x], dt)
return 0
@property
def jacobi_potential_energy(self):
parts=self.particles
return -0.5*(parts.mass*self.omega**2*(parts.x**2+parts.y**2)).sum()
def transform_inertial_to_rotating(self,x,y,vx,vy,inverse=False):
angle = self.initial_angle + self.omega*self.model_time
omega = self.omega
if inverse:
angle=-angle
omega=-omega
C1 = vx + omega*y
C2 = vy - omega*x
x_ = x * numpy.cos(angle) + y * numpy.sin(angle)
y_ = -x * numpy.sin(angle) + y * numpy.cos(angle)
vx_ = C1*numpy.cos(angle) + C2*numpy.sin(angle)
vy_ = C2*numpy.cos(angle) - C1*numpy.sin(angle)
return x_,y_,vx_,vy_
def transform_rotating_to_inertial(self,x,y,vx,vy):
return self.transform_inertial_to_rotating(x,y,vx,vy, inverse=True)
# this return a view on self.particles
# which automatically updates
# (uses above transforms for this reason)
@property
def particles_inertial_frame(self):
return TransformedParticles(self.particles,
["x","y","vx","vy"],
self.transform_rotating_to_inertial,
["x","y","vx","vy"],
self.transform_inertial_to_rotating,
)
@property
def gas_particles_inertial_frame(self):
return TransformedParticles(self.gas_particles,
["x","y","vx","vy"],
self.transform_rotating_to_inertial,
["x","y","vx","vy"],
self.transform_inertial_to_rotating,
)
class RotatingBridgeInertialParticles(Rotating_Bridge):
"""
same as above, except non-inertial frame is hidden. Note that:
code.particles.get_subsets()[i]
gets a view on the ith code particles
"""
@property
def particles(self):
arr=[]
for x in self.systems:
if hasattr(x,"particles"):
arr.append(x.particles)
particles=datamodel.ParticlesSuperset(arr)
return TransformedParticles(particles,
["x","y","vx","vy"],
self.transform_rotating_to_inertial,
["x","y","vx","vy"],
self.transform_inertial_to_rotating,
)
@property
def gas_particles(self):
arr=[]
for x in self.systems:
if hasattr(x,"gas_particles"):
arr.append(x.gas_particles)
particles=datamodel.ParticlesSuperset(arr)
return TransformedParticles(particles,
["x","y","vx","vy"],
self.transform_rotating_to_inertial,
["x","y","vx","vy"],
self.transform_inertial_to_rotating,
)
| 6,928
| 35.088542
| 113
|
py
|
amuse
|
amuse-main/src/amuse/ext/molecular_cloud.py
|
import numpy
from math import sqrt
from amuse.ext.evrard_test import regular_grid_unit_cube
from amuse.ext.evrard_test import uniform_unit_sphere
from amuse.ext.evrard_test import uniform_unit_cube
from amuse.units import constants
from amuse.units import nbody_system
from amuse.units import generic_unit_converter
from amuse.units import units
from amuse import datamodel
def make_ifft_real(nf,vi):
if vi.ndim==3:
# body of cube
vi[1:nf,1:2*nf,1:2*nf]=numpy.conj(vi[2*nf-1:nf:-1,2*nf-1:0:-1,2*nf-1:0:-1])
# 3 lower + middle planes
vi[0,1:nf,1:2*nf]=numpy.conj(vi[0,2*nf-1:nf:-1,2*nf-1:0:-1])
vi[1:nf,0,1:2*nf]=numpy.conj(vi[2*nf-1:nf:-1,0,2*nf-1:0:-1])
vi[1:nf,1:2*nf,0]=numpy.conj(vi[2*nf-1:nf:-1,2*nf-1:0:-1,0])
vi[nf,1:nf,1:2*nf]=numpy.conj(vi[nf,2*nf-1:nf:-1,2*nf-1:0:-1])
# 7 lines
vi[0,0,1:nf]=numpy.conj(vi[0,0,2*nf-1:nf:-1])
vi[0,1:nf,0]=numpy.conj(vi[0,2*nf-1:nf:-1,0])
vi[1:nf,0,0]=numpy.conj(vi[2*nf-1:nf:-1,0,0])
vi[0,nf,1:nf]=numpy.conj(vi[0,nf,2*nf-1:nf:-1])
vi[nf,0,1:nf]=numpy.conj(vi[nf,0,2*nf-1:nf:-1])
vi[nf,nf,1:nf]=numpy.conj(vi[nf,nf,2*nf-1:nf:-1])
vi[nf,1:nf,0]=numpy.conj(vi[nf,2*nf-1:nf:-1,0])
# 8 points
vi[0,0,0]=2*numpy.real(vi[0,0,0])
vi[nf,0,0]=2*numpy.real(vi[nf,0,0])
vi[0,nf,0]=2*numpy.real(vi[0,nf,0])
vi[nf,nf,0]=2*numpy.real(vi[nf,nf,0])
vi[0,0,nf]=2*numpy.real(vi[0,0,nf])
vi[nf,0,nf]=2*numpy.real(vi[nf,0,nf])
vi[0,nf,nf]=2*numpy.real(vi[0,nf,nf])
vi[nf,nf,nf]=2*numpy.real(vi[nf,nf,nf])
return vi
return -1
def random_field(nf=32, power=-3., seed=None):
if seed is not None:
numpy.random.seed(seed)
freq=numpy.mgrid[-nf:nf,-nf:nf,-nf:nf]
fi,fj,fk=freq
fi=fi.flatten()
fj=fj.flatten()
fk=fk.flatten()
norm=-numpy.log(numpy.random.uniform(0.,1.,len(fi)))*(fi**2+fj**2+fk**2+1.e-30)**(power/4.)
phase=numpy.random.uniform(0.,1.,len(fi))*2*numpy.pi
vi=norm*numpy.exp(phase*1j)
vi=vi.reshape(nf*2,nf*2,nf*2)
vi[nf,nf,nf]=0.
vi=make_ifft_real(nf,vi)
vi=numpy.fft.ifftshift( vi)
vi=numpy.fft.ifftn(vi)
if vi.imag.max()>1.e-16:
print("check random field")
return vi
def make_div_free(nf,vx,vy,vz):
vx=numpy.fft.fftn(vx)
vx=vx.flatten()
vy=numpy.fft.fftn(vy)
vy=vy.flatten()
vz=numpy.fft.fftn(vz)
vz=vz.flatten()
freq=numpy.mgrid[-nf:1.*nf,-nf:1.*nf,-nf:1.*nf]
fi,fj,fk=freq
fi=numpy.fft.fftshift( fi)
fj=numpy.fft.fftshift( fj)
fk=numpy.fft.fftshift( fk)
fi=fi.flatten()
fj=fj.flatten()
fk=fk.flatten()
ff=fi*fi+fj*fj+fk*fk+1.e-30
vdotf=(vx*fi+vy*fj+vz*fk)
vx=vx-fi*vdotf/ff
vy=vy-fj*vdotf/ff
vz=vz-fk*vdotf/ff
del fi,fj,fk,ff
vx=vx.reshape(2*nf,2*nf,2*nf)
vy=vy.reshape(2*nf,2*nf,2*nf)
vz=vz.reshape(2*nf,2*nf,2*nf)
# zero out nyquist freq planes: strictly speaking this is too drastic....
# inside the nyquist planes only v// f x f_mirror needs to be enforced (methinks)
vx[nf,0:2*nf,0:2*nf]=0.
vx[0:2*nf,nf,0:2*nf]=0.
vx[0:2*nf,0:2*nf,nf]=0.
vy[nf,0:2*nf,0:2*nf]=0.
vy[0:2*nf,nf,0:2*nf]=0.
vy[0:2*nf,0:2*nf,nf]=0.
vz[nf,0:2*nf,0:2*nf]=0.
vz[0:2*nf,nf,0:2*nf]=0.
vz[0:2*nf,0:2*nf,nf]=0.
vx=numpy.fft.ifftn(vx)
vy=numpy.fft.ifftn(vy)
vz=numpy.fft.ifftn(vz)
if vx.imag.max()>1.e-16:
print("check div-free field")
if vy.imag.max()>1.e-16:
print("check div-free field")
if vz.imag.max()>1.e-16:
print("check div-free field")
return vx.real,vy.real,vz.real
def interpolate_trilinear(x,y,z,farray):
if farray.ndim!=3:
return -1
nx,ny,nz=farray.shape
dx=2./nx
dy=2./ny
dz=2./nz
fx,xint=numpy.modf((x+1)/dx)
fy,yint=numpy.modf((y+1)/dy)
fz,zint=numpy.modf((z+1)/dz)
xint=xint.astype('i')
yint=yint.astype('i')
zint=zint.astype('i')
xint1=numpy.mod(xint+1,nx)
yint1=numpy.mod(yint+1,ny)
zint1=numpy.mod(zint+1,nz)
q111 = farray[xint, yint, zint]
q211 = farray[xint1, yint, zint]
q221 = farray[xint1, yint1, zint]
q121 = farray[xint, yint1, zint]
q112 = farray[xint, yint, zint1]
q212 = farray[xint1, yint, zint1]
q222 = farray[xint1, yint1, zint1]
q122 = farray[xint, yint1, zint1]
return (q222* fx*fy*fz +
q122* (1-fx)*fy*fz +
q212* fx*(1-fy)*fz +
q112* (1-fx)*(1-fy)*fz +
q221* fx*fy*(1-fz) +
q121* (1-fx)*fy*(1-fz) +
q211* fx*(1-fy)*(1-fz) +
q111* (1-fx)*(1-fy)*(1-fz))
class molecular_cloud(object):
def __init__(self,nf=32,power=-3.,targetN=10000, ethep_ratio=0.01,
convert_nbody=None,ekep_ratio=1.,seed=None,base_grid=None):
self.nf=nf
self.power=power
self.targetN=targetN
self.convert_nbody=convert_nbody
self.seed=seed
self.base_grid=base_grid
self.ethep_ratio=ethep_ratio
self.ekep_ratio=ekep_ratio
def new_model(self):
if self.seed is not None:
numpy.random.seed(self.seed)
vx_field=random_field(self.nf,self.power)
vy_field=random_field(self.nf,self.power)
vz_field=random_field(self.nf,self.power)
vx_field,vy_field,vz_field=make_div_free(self.nf,vx_field,vy_field,vz_field)
base_sphere=uniform_unit_sphere(self.targetN,base_grid=self.base_grid)
x,y,z=base_sphere.make_xyz()
self.actualN=len(x)
vx=interpolate_trilinear(x,y,z,vx_field)
vy=interpolate_trilinear(x,y,z,vy_field)
vz=interpolate_trilinear(x,y,z,vz_field)
mass=numpy.ones_like(x)/self.actualN
vx=vx-vx.mean()
vy=vy-vy.mean()
vz=vz-vz.mean()
Ep=3./5
self.internalE=Ep*self.ethep_ratio
Ek=0.5*mass[0]*(vx**2+vy**2+vz**2).sum()
vfac=sqrt(self.ekep_ratio*Ep/Ek)
vx=vx*vfac
vy=vy*vfac
vz=vz*vfac
Ek=0.5*mass[0]*(vx**2+vy**2+vz**2).sum()
internal_energy=numpy.ones_like(x)*self.internalE
return (mass,x,y,z,vx,vy,vz,internal_energy)
@property
def result(self):
mass,x,y,z,vx,vy,vz,u = self.new_model()
result = datamodel.Particles(self.actualN)
result.mass = nbody_system.mass.new_quantity(mass)
result.x = nbody_system.length.new_quantity(x)
result.y = nbody_system.length.new_quantity(y)
result.z = nbody_system.length.new_quantity(z)
result.vx = nbody_system.speed.new_quantity(vx)
result.vy = nbody_system.speed.new_quantity(vy)
result.vz = nbody_system.speed.new_quantity(vz)
result.u = (nbody_system.speed**2).new_quantity(u)
if not self.convert_nbody is None:
result = datamodel.ParticlesWithUnitsConverted(result, self.convert_nbody.as_converter_from_si_to_generic())
result = result.copy()
return result
class constant_density_div_free_power_law_v_ism_cube(object):
def __init__(self,nf=32,power=-3.,targetN=10000, eketh_ratio=1.,
convert=None,seed=None,base_grid=None):
self.nf=nf
self.power=power
self.targetN=targetN
self.convert=convert
self.seed=seed
self.base_grid=base_grid
self.eketh_ratio=eketh_ratio
def new_model(self):
if self.seed is not None:
numpy.random.seed(self.seed)
vx_field=random_field(self.nf,self.power)
vy_field=random_field(self.nf,self.power)
vz_field=random_field(self.nf,self.power)
vx_field,vy_field,vz_field=make_div_free(self.nf,vx_field,vy_field,vz_field)
base_cube=uniform_unit_cube(self.targetN,base_grid=self.base_grid)
x,y,z=base_cube.make_xyz()
self.actualN=len(x)
vx=interpolate_trilinear(x,y,z,vx_field)
vy=interpolate_trilinear(x,y,z,vy_field)
vz=interpolate_trilinear(x,y,z,vz_field)
mass=numpy.ones_like(x)/self.actualN
Ek=0.5*mass[0]*(vx**2+vy**2+vz**2).sum()
vfac=sqrt(1/self.eketh_ratio/Ek)
vx=vx*vfac
vy=vy*vfac
vz=vz*vfac
Ek=0.5*mass[0]*(vx**2+vy**2+vz**2).sum()
internal_energy=numpy.ones_like(x)
return (mass,x,y,z,vx,vy,vz,internal_energy)
@property
def result(self):
mass,x,y,z,vx,vy,vz,u = self.new_model()
result = datamodel.Particles(self.actualN)
result.mass = nbody_system.mass.new_quantity(mass)
result.x = nbody_system.length.new_quantity(x)
result.y = nbody_system.length.new_quantity(y)
result.z = nbody_system.length.new_quantity(z)
result.vx = nbody_system.speed.new_quantity(vx)
result.vy = nbody_system.speed.new_quantity(vy)
result.vz = nbody_system.speed.new_quantity(vz)
result.u = (nbody_system.speed**2).new_quantity(u)
if not self.convert is None:
result = datamodel.ParticlesWithUnitsConverted(result, self.convert.as_converter_from_si_to_generic())
result = result.copy()
return result
def ism_cube(targetN=10000,L=10| units.parsec,density=(1.14 | units.amu/units.cm**3), u=50 | (units. kms)**2,
nf=32,power=-3.,seed=None,base_grid=None, eketh_ratio=1.):
total_mass=density*(2*L)**3
internalE=total_mass*u
convert = generic_unit_converter.ConvertBetweenGenericAndSiUnits(total_mass, L, internalE)
return constant_density_div_free_power_law_v_ism_cube(convert=convert,targetN=targetN,nf=nf,
power=power,seed=seed,base_grid=base_grid,eketh_ratio=eketh_ratio)
def new_ism_cube(
number_of_particles = 1000,
cube_size = 10| units.parsec,
density = (1.14 | units.amu/units.cm**3),
u = 50 | (units. kms)**2):
result = ism_cube(number_of_particles, cube_size, density, u).result
result.rho = density
return result
if __name__=="__main__":
cloud=ism_cube()
parts=cloud.result
print(parts[0].u**0.5)
print(len(parts)*parts[0].mass.in_(units.MSun))
mu=1.4 | units.amu
gamma1=1.6667-1
print('Temp:', (gamma1*min(parts.u)*mu/constants.kB).in_(units.K))
total_mass=10000. | units.MSun
radius=10. | units.parsec
print('dens:',(total_mass*3/4./3.1415/radius**3).in_(units.amu/units.cm**3))
| 10,428
| 30.224551
| 120
|
py
|
amuse
|
amuse-main/src/amuse/ext/sph_to_grid.py
|
import numpy
from amuse.units import units
from amuse.support.exceptions import AmuseException
from amuse.community.gadget2.interface import Gadget2
from amuse.community.fi.interface import Fi
from amuse.datamodel import Grid
class _SPH2Grid(object):
def __init__(self, sph_code, dimensions, do_scale = False):
if (sph_code.mode != sph_code.MODE_PERIODIC_BOUNDARIES):
raise AmuseException("Only periodic boundary conditions supported")
if len(dimensions) != 3:
raise AmuseException("Argument dimensions must contain exactly three numbers")
if isinstance(sph_code, Fi):
self.box_offset = sph_code.parameters.periodic_box_size / 2.0
elif isinstance(sph_code, Gadget2):
self.box_offset = sph_code.parameters.periodic_box_size * 0.0
else:
raise AmuseException("Unknown hydrodynamics code: {0} - don't know whether the "
"box runs from 0 to L or from -0.5 L to 0.5 L.".format(sph_code.__class__.__name__))
self.sph_code = sph_code
self.dimensions = dimensions
self.do_scale = do_scale
self.box_size = sph_code.parameters.periodic_box_size
@property
def result(self):
grid = Grid.create(self.dimensions,
self.box_size.as_vector_with_length(3))
grid.add_vector_attribute("momentum", ["rhovx","rhovy","rhovz"])
zero = numpy.zeros(self.dimensions).flatten() | units.m / units.s
rho, rhovx, rhovy, rhovz, rhoe = self.sph_code.get_hydro_state_at_point(
grid.x.flatten() - self.box_offset,
grid.y.flatten() - self.box_offset,
grid.z.flatten() - self.box_offset, zero, zero, zero)
grid.rho = rho.reshape(self.dimensions)
grid.rhovx = rhovx.reshape(self.dimensions)
grid.rhovy = rhovy.reshape(self.dimensions)
grid.rhovz = rhovz.reshape(self.dimensions)
grid.energy = rhoe.reshape(self.dimensions)
if self.do_scale:
grid.rho *= self.sph_code.gas_particles.total_mass() / (grid.rho.sum() * grid.cellsize().prod())
total_sph_momentum = (self.sph_code.gas_particles.mass.reshape((-1,1)) * self.sph_code.gas_particles.velocity).sum(axis=0)
total_grid_momentum = grid.momentum.reshape((-1,3)).sum(axis=0)
grid.momentum += total_sph_momentum / grid.cellsize().prod() - total_grid_momentum
grid.energy *= self.sph_code.thermal_energy / (grid.energy.sum() * grid.cellsize().prod())
return grid
def convert_SPH_to_grid(sph_code, dimensions, **keyword_arguments):
"""
Currently works for periodic boundary conditions only...
Converts an SPH realization into a (cartesian) hydrodynamics Grid.
The SPH realization must reside in the SPH code 'sph_code', supporting the
get_hydro_state_at_point function. The number of grid cells in the x, y,
and z direction are determined by the 'dimensions' argument.
:argument sph_code: SPH code in which the gas particles reside
:argument dimensions: Tuple of three integers, defining the meshsize
:argument do_scale: If True, the grid density, momentum, and energy are
scaled to conserve the mass, momentum, and energy of the original model
"""
converter = _SPH2Grid(sph_code, dimensions, **keyword_arguments)
return converter.result
| 3,482
| 43.088608
| 134
|
py
|
amuse
|
amuse-main/src/amuse/ext/particles_with_color.py
|
"""
Particle colors
This module contains functions to compute colors for stars and SPH particles. These can be used
for visualisation.
"""
import os.path
import numpy
from amuse.units import units, constants
from amuse.support.exceptions import AmuseException
from amuse.datamodel import ParticlesOverlay
__all__ = ["new_particles_with_color", "new_particles_with_blackbody_color", "mu", "u_from_T", "T_from_u"]
def new_particles_with_color(original_particles, red_function, green_function, blue_function, attributes_names=None):
"""
Returns new color particles. These are bound to the 'original_particles' in
the sense that they share their attributes, but have additional attributes
'red', 'green', and 'blue'.
:argument original_particles: the particles for which the color needs to be computed
:argument red_function: function that computes red color of a particle
:argument green_function: function that computes green color of a particle
:argument blue_function: function that computes blue color of a particle
"""
original_particles.add_calculated_attribute("red", red_function, attributes_names=attributes_names)
original_particles.add_calculated_attribute("green", green_function, attributes_names=attributes_names)
original_particles.add_calculated_attribute("blue", blue_function, attributes_names=attributes_names)
original_particles.add_vector_attribute("color", ["red", "green", "blue"])
return original_particles
def mu(X = None, Y = 0.25, Z = 0.02, x_ion = 0.1):
"""
Compute the mean molecular weight in kg (the average weight of particles in a gas)
X, Y, and Z are the mass fractions of Hydrogen, of Helium, and of metals, respectively.
x_ion is the ionisation fraction (0 < x_ion < 1), 1 means fully ionised
"""
if X is None:
X = 1.0 - Y - Z
elif abs(X + Y + Z - 1.0) > 1e-6:
raise Exception("Error in calculating mu: mass fractions do not sum to 1.0")
return constants.proton_mass / (X*(1.0+x_ion) + Y*(1.0+2.0*x_ion)/4.0 + Z*x_ion/2.0)
def u_from_T(T, mu=mu(Y=0.25, Z=0.02, x_ion=0.1)):
"""
Computes internal energy from temperature for a monatomic ideal gas. The default mean
molecular weight is for solar composition with an ionisation fraction of 0.1.
"""
return 3.0/2.0 * constants.kB * T / mu
def T_from_u(u, mu=mu(Y=0.25, Z=0.02, x_ion=0.1)):
"""
Computes temperature from internal energy for a monatomic ideal gas. The default mean
molecular weight is for solar composition with an ionisation fraction of 0.1.
"""
return 2.0/3.0 * u * mu / constants.kB
class BlackBodyColorFromTemperature(object):
def __init__(self):
self.create_temperature_to_RGB_table()
def input_value_to_temperature_converter(self, temperature):
return temperature
def red_function(self, input_value):
temperature = self.input_value_to_temperature_converter(input_value)
index = numpy.searchsorted(self.temperature.number[1:], temperature.value_in(self.temperature.unit))
return self.red[index]
def green_function(self, input_value):
temperature = self.input_value_to_temperature_converter(input_value)
index = numpy.searchsorted(self.temperature.number[1:], temperature.value_in(self.temperature.unit))
return self.green[index]
def blue_function(self, input_value):
temperature = self.input_value_to_temperature_converter(input_value)
index = numpy.searchsorted(self.temperature.number[1:], temperature.value_in(self.temperature.unit))
return self.blue[index]
def create_temperature_to_RGB_table(self):
table_file = os.path.join(os.path.dirname(__file__), 'bbr_color.txt')
with open(table_file, 'r') as infile:
temperature, red, green, blue = ([], [], [], [])
for line in infile.readlines():
words = line.split()
if line[0] == "#" or words[2] == "10deg":
continue
temperature.append(float(words[0]))
red.append(float(words[6]))
green.append(float(words[7]))
blue.append(float(words[8]))
self.red = numpy.array(red)
self.green = numpy.array(green)
self.blue = numpy.array(blue)
self.temperature = temperature | units.K
class BlackBodyColorFromInternalEnergy(BlackBodyColorFromTemperature):
def __init__(self, X=None, Y=0.25, Z=0.02, x_ion=0.1):
self.create_temperature_to_RGB_table()
self.mu = mu(X=X, Y=Y, Z=Z, x_ion=x_ion)
def input_value_to_temperature_converter(self, u):
return T_from_u(u, mu=self.mu)
def new_particles_with_blackbody_color(original_particles, **kwargs):
"""
Returns new color particles. These are bound to the 'original_particles' in
the sense that they share their attributes, but have additional attributes
'red', 'green', and 'blue'. These colors are based on Mitchell Charity's
blackbody color datafile (bbr_color.txt, see
http://www.vendian.org/mncharity/dir3/blackbody/)
If the particles have a temperature attribute, the colors are computed from
these. Otherwise they will be computed from the gas internal energy, using the
T_from_u function, in which case the optional keyword arguments X, Y,
Z, and x_ion can be supplied.
:argument original_particles: the particles for which the color needs to be computed
:argument X: hydrogen abundance for T_from_u converter (default: None, i.e. compute from Y and Z)
:argument Y: helium abundance for T_from_u converter (default: 0.25)
:argument Z: metal (everything heavier than helium) abundance for T_from_u converter (default: 0.02)
:argument x_ion: ionisation fraction for T_from_u converter (default: 0.1)
"""
if "temperature" in original_particles.get_attribute_names_defined_in_store():
colors = BlackBodyColorFromTemperature(**kwargs)
attributes_names = ["temperature"]
elif "u" in original_particles.get_attribute_names_defined_in_store():
colors = BlackBodyColorFromInternalEnergy(**kwargs)
attributes_names = ["u"]
else:
raise AmuseException("The particles need to have 'temperature' or 'u' attributes for deriving black body colors")
return new_particles_with_color(
original_particles,
colors.red_function, colors.green_function, colors.blue_function,
attributes_names=attributes_names)
| 6,567
| 44.93007
| 121
|
py
|
amuse
|
amuse-main/src/amuse/ext/roche_radius.py
|
"""
Various equations to calculate the size of the Roche lobe of a system.
There are three ways to use this module:
1. From the command line:
python roche_radius.py -a 10 -e 0.5 -m 10
2. Using the direct functions:
q = 10
print eggleton_formula(q)
e = 0.5
A = sepinsky_A_parameter(e)
print sepinsky_formula(q, A)
3. Using the Roche_Orbit class:
orbit = Roche_Orbit(mass_2=10|units.MSun, eccentricity=0.5)
print orbit.eggleton_roche_radius()
print orbit.sepinsky_roche_radius()
The functions also accept arrays instead of single numbers:
orbit.mass_1 = [1.0, 2.0, 3.0] | units.MSun
print orbit.sepinsky_roche_radius()
"""
import numpy
import math
from amuse.units.optparse import OptionParser
from amuse.units import units, constants, quantities
from amuse.support.console import set_printing_strategy
""" Equation 47-52 of Sepinsky """
def low_q_low_A(q, A):
log_q = numpy.log10(q)
return 1 + 0.11 * (1-A) - 0.05 * (1-A) * numpy.exp(-(0.5 * (1 + A) + log_q)**2)
def high_q_low_A(q, A):
log_q = numpy.log10(q)
return 1.226 - 0.21 * A - 0.15 * (1-A) * numpy.exp((0.25 * A - 0.3) * (log_q)**1.55)
def low_q_medium_A(q, A):
log_q = numpy.log10(q)
log_A = numpy.log10(A)
g_0 = 0.9978 - 0.1229 * log_A - 0.1273 * (log_A)**2
g_1 = 0.001 + 0.02556 * log_A
g_2 = 0.0004 + 0.0021 * log_A
return g_0 + g_1 * log_q + g_2 * (log_q)**2
def high_q_medium_A(q, A):
log_q = numpy.log10(q)
log_A = numpy.log10(A)
h_0 = 1.0071 - 0.0907 * log_A - 0.0495 * (log_A)**2
h_1 = -0.004 - 0.163 * log_A - 0.214 * (log_A)**2
h_2 = 0.00022 - 0.0108 * log_A - 0.02718 * (log_A)**2
return h_0 + h_1 * log_q + h_2 * (log_q)**2
def low_q_high_A(q, A):
log_q = numpy.log10(q)
log_A = numpy.log10(A)
num_0 = 6.3014 * (log_A)**1.3643
den_0 = numpy.exp(2.3644 * (log_A)**0.70748) - 1.4413 * numpy.exp(-0.0000184 * (log_A)**-4.5693)
i_0 = num_0 / den_0
den_1 = 0.0015 * numpy.exp(8.84 * (log_A)**0.282) + 15.78
i_1 = log_A / den_1
num_2 = 1 + 0.036 * numpy.exp(8.01 * (log_A)**0.879)
den_2 = 0.105 * numpy.exp(7.91 * (log_A)**0.879)
i_2 = num_2 / den_2
den_3 = 1.38 * numpy.exp(-0.035 * (log_A)**0.76) + 23.0 * numpy.exp(-2.89 * (log_A)**0.76)
i_3 = 0.991 / den_3
return i_0 + i_1 * numpy.exp(-i_2 * (log_q + i_3)**2)
def high_q_high_A(q, A):
log_q = numpy.log10(q)
log_A = numpy.log10(A)
num_0 = 1.895 * (log_A)**0.837
den_0 = numpy.exp(1.636 * (log_A)**0.789) - 1
j_0 = num_0 / den_0
num_1 = 4.3 * (log_A)**0.98
den_1 = numpy.exp(2.5 * (log_A)**0.66) + 4.7
j_1 = num_1 / den_1
den_2 = 8.8 * numpy.exp(-2.95 * (log_A)**0.76) + 1.64 * numpy.exp(-0.03 * (log_A)**0.76)
j_2 = 1.0 / den_2
j_3 = 0.256 * numpy.exp(-1.33 * (log_A)**2.9) * (5.5 * numpy.exp(1.33 * (log_A)**2.9) + 1)
return j_0 + j_1 * numpy.exp(-j_2 * (log_q)**j_3)
functions = {'low': {'low':low_q_low_A, 'medium':low_q_medium_A, 'high':low_q_high_A },
'high': {'low':high_q_low_A, 'medium':high_q_medium_A, 'high':high_q_high_A }}
def pick_formula(q, A):
log_q = numpy.log10(q)
log_A = numpy.log10(A)
q_regime = 'low' if log_q <= 0 else 'high'
A_regime = 'low' if log_A <= -0.1 else 'medium' if -0.1 < log_A <= 0.2 else 'high'
function = functions[q_regime][A_regime]
return function(q, A)
vec_pick_formula = numpy.vectorize(pick_formula)
def sepinsky_formula(q=1, A=1):
""" The correction to the Eggleton Roche radius following Sepinsky, Willems and Kalogera 2007 """
return vec_pick_formula(q, A)
def sepinsky_A_parameter(eccentricity=0.0, angular_velocity_ratio=1.0, true_anomaly=numpy.pi):
""" Euation 21 of Sepinsky 2007 """
numerator = angular_velocity_ratio**2 * (1.0 + eccentricity)**4
denominator = (1.0 + eccentricity * numpy.cos(true_anomaly))**3
return numerator / denominator
def eggleton_formula(mass_ratio):
""" Use the Eggleton formula for the given mass ratio.
The Eggleton formula assumes a circular corotating system, which means:
eccentricity = 0
true_anomaly = pi
angular_velocity_ratio = 1
"""
two_third = mass_ratio**(2.0/3.0)
one_third = mass_ratio**(1.0/3.0)
return 0.49 * two_third / ( 0.6 * two_third + numpy.log(1.0 + one_third))
def separation(semimajor_axis, eccentricity, true_anomaly):
""" Return the orbital separation in the same units as the semimajor axis"""
numerator = semimajor_axis * (1.0-eccentricity**2)
denominator = 1.0 + eccentricity * numpy.cos(true_anomaly)
return numerator / denominator
class Roche_Orbit(object):
"""
A set of orbital parameters that allows the calculation of the Roche radius.
See:
Eggleton 1983
Sepinsky, Willems and Kalogera 2007
"""
def __init__(self, mass_1=1.0|units.MSun, mass_2=1.0|units.MSun, eccentricity=0.0,
true_anomaly=0.0, angular_velocity_ratio=1.0, semimajor_axis=1.0|units.RSun,
period=None):
self.mass_1 = mass_1
self.mass_2 = mass_2
self.eccentricity = eccentricity
self.true_anomaly = true_anomaly
self.angular_velocity_ratio = angular_velocity_ratio
self.semimajor_axis = semimajor_axis
if period is not None:
self.period = period
@property
def mass_ratio(self):
return self.mass_1 / (1. * self.mass_2)
@mass_ratio.setter
def mass_ratio(self, value):
self.mass_1 = value * self.mass_2
@property
def total_mass(self):
return self.mass_1 + self.mass_2
@property
def A(self):
return sepinsky_A_parameter(self.eccentricity, self.angular_velocity_ratio, self.true_anomaly)
@property
def period(self):
return (4.0 * numpy.pi**2 * self.semimajor_axis**3 / (constants.G * self.total_mass)).sqrt()
@period.setter
def period(self, period):
self.semimajor_axis = (period/(2.0 * numpy.pi) * (self.total_mass * constants.G).sqrt())**(2.0/3.0)
def sepinsky_over_eggleton(self):
return sepinsky_formula(self.mass_ratio, self.A)
def eggleton_roche_over_separation(self):
return eggleton_formula(self.mass_ratio)
def eggleton_roche_radius(self):
""" The Roche radius assumes a curcular orbit with the current separation.
Note that this is not really correct for non-circular orbits.
"""
return self.eggleton_roche_over_separation() * self.separation()
def sepinsky_roche_radius(self):
return self.sepinsky_over_eggleton() * self.eggleton_roche_radius()
def separation(self):
return separation(self.semimajor_axis, self.eccentricity, self.true_anomaly)
def create_orbit_from_particles(particles, angular_velocity=0.|units.yr**-1):
"""
Use mass, position and velocity to determine orbital parameters.
Then setup Roche_Orbit
"""
roche = Roche_Orbit()
roche.mass_1, roche.mass_2 = particles.mass
position_vector = particles.position[1] - particles.position[0]
velocity_vector = particles.velocity[1] - particles.velocity[0]
mu = constants.G * particles.mass.sum()
separation = position_vector.length()
speed_squared = velocity_vector.length_squared()
roche.semimajor_axis = mu * separation / (2 * mu - separation * speed_squared)
e_vector = speed_squared * position_vector / mu - position_vector.dot(velocity_vector) * velocity_vector / mu - position_vector / separation
roche.eccentricity = numpy.sqrt((e_vector * e_vector).sum())
roche.true_anomaly = numpy.arccos(position_vector.dot(e_vector) / (roche.eccentricity * separation))
if position_vector.dot(velocity_vector) < quantities.zero:
# arccos is ambiguous
roche.true_anomaly = 2. * numpy.pi - roche.true_anomaly
period = (4. * numpy.pi**2 * roche.semimajor_axis**3 / mu).sqrt()
peri_orbital_angular_velocity = 2. * numpy.pi / period * (1. + roche.eccentricity)**2/ (1-roche.eccentricity**2)**(3./2.)
roche.angular_velocity_ratio = angular_velocity / peri_orbital_angular_velocity
return roche
def new_option_parser():
parser = OptionParser(description="Calculate the Roche radius for a given orbit.")
parser.add_option("-a", dest="semimajor_axis", type="float", default = 1.0, unit=units.AU, help="The orbit semimajor axis [%default %unit]")
parser.add_option("-p", dest="period", type="float", default = 0.0, unit=units.day, help="The orbital period, which sets the semimajor axis [%unit]")
parser.add_option("-e", dest="eccentricity", type="float", default = 0.0, help="The orbit eccentricity [%default]")
parser.add_option("-M", dest="mass_1", type="float", default = 1.0, unit=units.MSun, help="The mass of the primary (the object that has the Roche lobe) [%default %unit]")
parser.add_option("-m", dest="mass_2", type="float", default = 1.0, unit=units.MSun, help="The mass of the secondary (the object which causes the Roche lobe) [%default %unit]")
parser.add_option("-n", dest="true_anomaly", type="float", default = 0.0, help="The true anomaly, the angle between the objects location and it's periastron location [%default]")
parser.add_option("-f", dest="angular_velocity_ratio", type="float", default = 1.0, help="The rotational angular velocity of object 1, in units of the orbital angular velocity at periastron [%default]")
return parser
def create_orbit_from_options():
options, args = new_option_parser().parse_args()
options = options.__dict__
period = options.pop("period")
orbit = Roche_Orbit(**options)
if period > quantities.zero:
orbit.period = period
return orbit
def print_results(orbit):
set_printing_strategy("custom", preferred_units = [units.MSun, units.RSun, units.Myr], precision = 7)
if orbit.A == 1.0:
print("This is a circular, corotating orbit, so the eggleton formula is correct.")
else:
print("Warning: This is not a circular, corotating orbit, so the eggleton formula is not correct.")
print("Roche radius for: M =", orbit.mass_1, "m =", orbit.mass_2, "a =", orbit.semimajor_axis, "e =", orbit.eccentricity)
print()
print("Eggleton Roche radius =", orbit.eggleton_roche_radius())
print("Sepinsky Roche radius =", orbit.sepinsky_roche_radius())
if __name__ == '__main__':
orbit = create_orbit_from_options()
print_results(orbit)
| 10,547
| 35.752613
| 206
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.