repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
amuse
|
amuse-main/src/amuse/ext/orbital_elements.py
|
"""
orbital element conversion and utility functions
this module provides:
generate_binaries
orbital_elements
get_orbital_elements_from_binary
get_orbital_elements_from_binaries
get_orbital_elements_from_arrays
and the following deprecated functions (assume input
or output angle floats to be degrees):
new_binary_from_orbital_elements
orbital_elements_from_binary
orbital_elements_for_rel_posvel_arrays
"""
import numpy
import warnings
from amuse.units import units, constants, nbody_system
from amuse.units.trigo import cos, sin, arccos, arctan2
from amuse.datamodel import Particles, Particle
from amuse.units.quantities import to_quantity, VectorQuantity
def derive_G(unit_or_quantity):
unit=unit_or_quantity.unit
if(unit.base_system==constants.G.unit.base_system):
G=constants.G
elif(unit.base_system==nbody_system.G.unit.base_system):
G=nbody_system.G
else:
raise Exception("units not known, provide a G constant")
return G
def newton(f, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50):
if fprime is None:
warnings.warn("provide fprime")
return x0
i = 0
x = x0
while (i < maxiter):
fv = f(x, *args)
dfv = fprime(x, *args)
if(dfv == 0):
return x0, -2
delta = -fv/dfv
if(abs(delta) < tol):
return x+delta, 0
x = x+delta
i = i+1
return x, -1
def true_anomaly_from_eccentric_anomaly(E, e):
return 2*arctan2((1+e)**0.5*sin(E/2), (1-e)**0.5*cos(E/2))
def equal_length_array_or_scalar(
array, length=1, mode="continue"
):
"""
Returns 'array' if its length is equal to 'length'. If this is not the
case, returns an array of length 'length' with values equal to the first
value of the array (or if 'array' is a scalar, that value. If mode is
"warn", issues a warning if this happens; if mode is "exception" raises an
exception in this case.
"""
try:
array_length = len(array)
if array_length == length:
return array
else:
if mode == "warn":
warnings.warn("Length of array is not equal to %i. Using only\
the first value." % length)
try:
unit = array.unit
value = array[0].value_in(unit)
except:
unit = units.none
value = array[0]
array = VectorQuantity(
array=numpy.ones(length) * value,
unit=unit,
)
return array
elif mode == "exception":
raise Exception("Length of array is not equal to %i. This is\
not supported." % length)
except:
try:
unit = array.unit
value = array.value_in(unit)
except:
unit = units.none
value = array
array = VectorQuantity(
array=numpy.ones(length) * value,
unit=unit,
)
if mode == "warn":
warnings.warn("Using single value for all cases.")
return array
def center_of_mass_array(
vectors,
primary_mass,
secondary_mass,
):
"""
Returns array of center_of_mass vectors, where primaries are considered to
be at (0,0,0) and secondaries at 'vectors'.
"""
total_mass = (primary_mass + secondary_mass).reshape(
(len(primary_mass), 1)
)
center_of_mass_array = (
(
vectors
* secondary_mass.reshape(
(len(secondary_mass), 1)
)
)
/ total_mass
)
return center_of_mass_array
def orbital_period_to_semimajor_axis( T, M1, M2=None, G=None ):
if G is None:
G=derive_G(M1)
if M2 is None:
M2=0.*M1
mu = G * (M1 + M2)
semi_major_axis = ((T / (2*numpy.pi))**2 * mu)**(1./3.)
return semi_major_axis
def semimajor_axis_to_orbital_period( a, M1, M2=None, G=None ):
if G is None:
G=derive_G(M1)
if M2 is None:
M2=0.*M1
mu = G * (M1 + M2)
orbital_period = 2*numpy.pi*(a**3/mu)**0.5
return orbital_period
def rel_posvel_arrays_from_orbital_elements(
primary_mass,
secondary_mass,
semi_major_axis,
eccentricity=0,
true_anomaly=0 | units.rad,
inclination=0 | units.rad,
longitude_of_the_ascending_node=0 | units.rad,
argument_of_periapsis=0 | units.rad,
G=None
):
"""
Returns relative positions/velocities for secondaries orbiting primaries.
If primary_mass is a scalar, assumes the same primary for all secondaries.
"""
if G is None:
G=derive_G(primary_mass)
try:
number_of_secondaries = len(secondary_mass)
except:
number_of_secondaries = 1
# arrays need to be equal to number of secondaries, or have just one value
primary_mass = equal_length_array_or_scalar(
primary_mass, length=number_of_secondaries)
semi_major_axis = equal_length_array_or_scalar(
semi_major_axis, length=number_of_secondaries)
eccentricity = equal_length_array_or_scalar(
eccentricity, length=number_of_secondaries)
true_anomaly = equal_length_array_or_scalar(
true_anomaly, length=number_of_secondaries)
inclination = equal_length_array_or_scalar(
inclination, length=number_of_secondaries)
longitude_of_the_ascending_node = equal_length_array_or_scalar(
longitude_of_the_ascending_node, length=number_of_secondaries)
argument_of_periapsis = equal_length_array_or_scalar(
argument_of_periapsis, length=number_of_secondaries)
cos_true_anomaly = cos(true_anomaly)
sin_true_anomaly = sin(true_anomaly)
cos_inclination = cos(inclination)
sin_inclination = sin(inclination)
cos_arg_per = cos(argument_of_periapsis)
sin_arg_per = sin(argument_of_periapsis)
cos_long_asc_nodes = cos(longitude_of_the_ascending_node)
sin_long_asc_nodes = sin(longitude_of_the_ascending_node)
# alpha is a unit vector directed along the line of node
alphax = (
cos_long_asc_nodes*cos_arg_per
- sin_long_asc_nodes*sin_arg_per*cos_inclination
)
alphay = (
sin_long_asc_nodes*cos_arg_per
+ cos_long_asc_nodes*sin_arg_per*cos_inclination
)
alphaz = sin_arg_per*sin_inclination
alpha = numpy.array([alphax, alphay, alphaz])
# beta is a unit vector perpendicular to alpha and the orbital angular
# momentum vector
betax = (
- cos_long_asc_nodes*sin_arg_per
- sin_long_asc_nodes*cos_arg_per*cos_inclination
)
betay = (
- sin_long_asc_nodes*sin_arg_per
+ cos_long_asc_nodes*cos_arg_per*cos_inclination
)
betaz = cos_arg_per*sin_inclination
beta = numpy.array([betax, betay, betaz])
# Relative position and velocity
separation = ( # Compute the relative separation
semi_major_axis*(1.0 - eccentricity**2)
/ (1.0 + eccentricity*cos_true_anomaly)
)
position_vector = (
separation*cos_true_anomaly*alpha
+ separation*sin_true_anomaly*beta
).T
velocity_tilde = (
(
G*(primary_mass + secondary_mass)
/ (semi_major_axis*(1.0 - eccentricity**2))
)**0.5
) # Common factor
velocity_vector = (
-1.0 * velocity_tilde * sin_true_anomaly * alpha
+ velocity_tilde*(eccentricity + cos_true_anomaly)*beta
).T
return position_vector, velocity_vector
def generate_binaries(
primary_mass,
secondary_mass,
semi_major_axis,
eccentricity=0,
true_anomaly=0 | units.rad,
inclination=0 | units.rad,
longitude_of_the_ascending_node=0 | units.rad,
argument_of_periapsis=0 | units.rad,
G=None
):
"""
returns two particlesets, which contain the primaries and the secondaries
in binary pairs.
"""
if G is None:
G=derive_G(primary_mass)
mass_unit = primary_mass.unit
try:
number_of_primaries = len(primary_mass)
except:
number_of_primaries = 1
primary_mass = numpy.array(
[primary_mass.value_in(mass_unit)]
) | mass_unit
try:
number_of_secondaries = len(secondary_mass)
except:
number_of_secondaries = 1
secondary_mass = numpy.array(
[secondary_mass.value_in(mass_unit)]
) | mass_unit
if number_of_primaries==1 and number_of_secondaries:
number_of_primaries = number_of_secondaries
primary_mass = primary_mass[0] * numpy.ones(number_of_secondaries)
# mass arrays need to be the same length
if number_of_secondaries != number_of_primaries:
raise Exception("The number of primaries is not the same as the number\
of secondaries, this is not supported.")
position_vector, velocity_vector = rel_posvel_arrays_from_orbital_elements(
primary_mass,
secondary_mass,
semi_major_axis,
eccentricity=eccentricity,
true_anomaly=true_anomaly,
inclination=inclination,
longitude_of_the_ascending_node=longitude_of_the_ascending_node,
argument_of_periapsis=argument_of_periapsis,
G=G
)
number_of_primaries
primaries = Particles(number_of_primaries)
secondaries = Particles(number_of_secondaries)
primaries.mass = primary_mass
secondaries.mass = secondary_mass
centers_of_mass = center_of_mass_array(
position_vector, primary_mass, secondary_mass)
centers_of_mass_velocity = center_of_mass_array(
velocity_vector, primary_mass, secondary_mass)
primaries.position = - centers_of_mass
secondaries.position = position_vector - centers_of_mass
primaries.velocity = - centers_of_mass_velocity
secondaries.velocity = velocity_vector - centers_of_mass_velocity
return primaries, secondaries
def new_binary_from_orbital_elements(
mass1,
mass2,
semimajor_axis,
eccentricity=0,
true_anomaly=0 | units.deg,
inclination=0 | units.deg,
longitude_of_the_ascending_node=0 | units.deg,
argument_of_periapsis=0 | units.deg,
G=None
):
"""
returns a two-particle Particle set, with the second particle's position
and velocities computed from the input orbital elements.
inclination is given between 0 and 180 deg.
angles are assumed to be in deg if no unit is given.
"""
def angle_with_unit(angle, default_unit=units.deg):
try:
default_unit = angle.unit
except:
angle = angle | default_unit
return angle
# If no unit is given for angles, assume they are in degrees
true_anomaly = angle_with_unit(true_anomaly, default_unit=units.deg)
inclination = angle_with_unit(inclination, default_unit=units.deg)
argument_of_periapsis = angle_with_unit(
argument_of_periapsis,
default_unit=units.deg
)
longitude_of_the_ascending_node = angle_with_unit(
longitude_of_the_ascending_node,
default_unit=units.deg
)
primary, secondary = generate_binaries(
mass1, mass2, semimajor_axis,
eccentricity=eccentricity,
true_anomaly=true_anomaly,
inclination=inclination,
longitude_of_the_ascending_node=longitude_of_the_ascending_node,
argument_of_periapsis=argument_of_periapsis,
G=G
)
result = Particles()
result.add_particle(primary[0])
result.add_particle(secondary[0])
return result
def get_orbital_elements_from_binary(binary, G=None):
"""
Function that computes orbital elements from given two-particle set.
Elements are computed for the second particle in this set and the
return values are: mass1, mass2, semimajor axis, eccentricity,
cosine of true anomaly, cosine of inclination, cosine of the
longitude of the ascending node and the cosine of the argument of
pericenter. In case of a perfectly circular orbit the true anomaly
and argument of pericenter cannot be determined; in this case the
return values are 1.0 for both cosines.
"""
primaries = Particles()
secondaries = Particles()
if len(binary) > 2:
raise Exception("expects binary or single part")
elif len(binary) == 2:
primaries.add_particle(binary[0])
secondaries.add_particle(binary[1])
else:
# FIXME: in case of one particle, what do we calculate the orbit of?
# The method below is what was default before.
primaries.add_particle(binary[0])
primaries[0].position *= 0
primaries[0].velocity *= 0
secondaries.add_particle(Particle())
secondaries[0].mass = 0 * primaries[0].mass
secondaries[0].position = binary.position
secondaries[0].velocity = binary.velocity
(
mass1, mass2, semimajor_axis, eccentricity, true_anomaly,
inclination, long_asc_node, arg_per
) = get_orbital_elements_from_binaries(primaries, secondaries, G=G)
return (
mass1[0], mass2[0], semimajor_axis[0], eccentricity[0],
true_anomaly[0], inclination[0], long_asc_node[0], arg_per[0])
def orbital_elements_from_binary(binary, G=None):
(
mass1, mass2, semimajor_axis, eccentricity, true_anomaly,
inclination, long_asc_node, arg_per
) = get_orbital_elements_from_binary(binary, G=G)
return (
mass1, mass2, semimajor_axis, eccentricity,
true_anomaly.value_in(units.deg),
inclination.value_in(units.deg),
long_asc_node.value_in(units.deg),
arg_per.value_in(units.deg))
def get_orbital_elements_from_binaries(
primaries, secondaries, G=None):
"""
Function that computes orbital elements from given primaries and
secondaries.
Elements are computed for the second particle in this set and the
return values are: mass1, mass2, semimajor axis, eccentricity,
cosine of true anomaly, cosine of inclination, cosine of the
longitude of the ascending node and the cosine of the argument of
pericenter. In case of a perfectly circular orbit the true anomaly
and argument of pericenter cannot be determined; in this case the
return values are 1.0 for both cosines.
"""
position = secondaries.position - primaries.position
velocity = secondaries.velocity - primaries.velocity
mass1 = primaries.mass
mass2 = secondaries.mass
total_mass = mass1 + mass2
semimajor_axis, eccentricity, true_anomaly, inclination, long_asc_node, \
arg_per = get_orbital_elements_from_arrays(
position, velocity, total_mass, G=G)
return (
mass1, mass2, semimajor_axis, eccentricity, true_anomaly,
inclination, long_asc_node, arg_per)
def get_orbital_elements_from_arrays(
rel_position_raw, rel_velocity_raw,
total_masses, G=None):
"""
Orbital elements from array of relative positions and velocities vectors,
based on orbital_elements_from_binary and adapted to work for arrays (each
line characterises a two body problem).
For circular orbits (eccentricity=0): returns argument of pericenter = 0.,
true anomaly = 0.
For equatorial orbits (inclination=0): longitude of ascending node = 0,
argument of pericenter = arctan2(e_y,e_x).
:argument rel_position: array of vectors of relative positions of the
two-body systems
:argument rel_velocity: array of vectors of relative velocities of the
two-body systems
:argument total_masses: array of total masses for two-body systems
:argument G: gravitational constant
:output semimajor_axis: array of semi-major axes
:output eccentricity: array of eccentricities
:output period: array of orbital periods
:output inc: array of inclinations [radians]
:output long_asc_node: array of longitude of ascending nodes [radians]
:output arg_per_mat: array of argument of pericenters [radians]
:output true_anomaly: array of true anomalies [radians]
"""
if len(numpy.shape(rel_position_raw)) == 1:
rel_position = numpy.zeros([1, 3]) * rel_position_raw[0]
rel_position[0, 0] = rel_position_raw[0]
rel_position[0, 1] = rel_position_raw[1]
rel_position[0, 2] = rel_position_raw[2]
rel_velocity = numpy.zeros([1, 3]) * rel_velocity_raw[0]
rel_velocity[0, 0] = rel_velocity_raw[0]
rel_velocity[0, 1] = rel_velocity_raw[1]
rel_velocity[0, 2] = rel_velocity_raw[2]
else:
rel_position = rel_position_raw
rel_velocity = rel_velocity_raw
if G is None:
G=derive_G(total_masses[0])
separation = (rel_position**2).sum(axis=1)**0.5
n_vec = len(rel_position)
speed_squared = (rel_velocity**2).sum(axis=1)
semimajor_axis = (
G * total_masses * separation
/ (2. * G * total_masses - separation * speed_squared)
)
neg_ecc_arg = (
(
to_quantity(rel_position).cross(rel_velocity)**2
).sum(axis=-1)
/ (G * total_masses * semimajor_axis)
)
filter_ecc0 = (1. <= neg_ecc_arg)
eccentricity = numpy.zeros(separation.shape)
eccentricity[~filter_ecc0] = numpy.sqrt(1.0 - neg_ecc_arg[~filter_ecc0])
eccentricity[filter_ecc0] = 0.
# angular momentum
mom = to_quantity(rel_position).cross(rel_velocity)
# inclination
inc = arccos(mom[:, 2]/to_quantity(mom).lengths())
# Longitude of ascending nodes, with reference direction along x-axis
asc_node_matrix_unit = numpy.zeros(rel_position.shape)
z_vectors = numpy.zeros([n_vec, 3])
z_vectors[:, 2] = 1.
z_vectors = z_vectors | units.none
ascending_node_vectors = z_vectors.cross(mom)
filter_non0_incl = (
to_quantity(ascending_node_vectors).lengths().number > 0.)
asc_node_matrix_unit[~filter_non0_incl] = numpy.array([1., 0., 0.])
an_vectors_len = to_quantity(
ascending_node_vectors[filter_non0_incl]).lengths()
asc_node_matrix_unit[filter_non0_incl] = normalize_vector(
ascending_node_vectors[filter_non0_incl],
an_vectors_len)
long_asc_node = arctan2(
asc_node_matrix_unit[:, 1],
asc_node_matrix_unit[:, 0])
# Argument of periapsis using eccentricity a.k.a. Laplace-Runge-Lenz vector
mu = G*total_masses
pos_unit_vecs = normalize_vector(rel_position, separation)
mom_len = to_quantity(mom).lengths()
mom_unit_vecs = normalize_vector(mom, mom_len)
e_vecs = (
normalize_vector(
to_quantity(rel_velocity).cross(mom), mu)
- pos_unit_vecs
)
# Argument of pericenter cannot be determined for e = 0,
# in this case return 0.0 and 1.0 for the cosines
e_vecs_norm = (e_vecs**2).sum(axis=1)**0.5
filter_non0_ecc = (e_vecs_norm > 1.e-15)
arg_per_mat = VectorQuantity(
array=numpy.zeros(long_asc_node.shape),
unit=units.rad)
cos_arg_per = numpy.zeros(long_asc_node.shape)
arg_per_mat[~filter_non0_ecc] = 0. | units.rad
cos_arg_per[~filter_non0_ecc] = 1.
e_vecs_unit = numpy.zeros(rel_position.shape)
e_vecs_unit[filter_non0_ecc] = normalize_vector(
e_vecs[filter_non0_ecc],
e_vecs_norm[filter_non0_ecc]
)
cos_arg_per[filter_non0_ecc] = (
e_vecs_unit[filter_non0_ecc]
* asc_node_matrix_unit[filter_non0_ecc]
).sum(axis=-1)
e_cross_an = numpy.zeros(e_vecs_unit.shape)
e_cross_an[filter_non0_ecc] = numpy.cross(
e_vecs_unit[filter_non0_ecc],
asc_node_matrix_unit[filter_non0_ecc]
)
e_cross_an_norm = (e_cross_an**2).sum(axis=1)**0.5
filter_non0_e_cross_an = (e_cross_an_norm != 0.)
ss = -numpy.sign(
(
mom_unit_vecs[filter_non0_e_cross_an]
* e_cross_an[filter_non0_e_cross_an]
).sum(axis=-1)
)
# note change in size in sin_arg_per and cos_arg_per; they are not used further
sin_arg_per = ss*e_cross_an_norm[filter_non0_e_cross_an]
cos_arg_per = cos_arg_per[filter_non0_e_cross_an]
arg_per_mat[filter_non0_e_cross_an] = arctan2(sin_arg_per, cos_arg_per)
# in case longitude of ascending node is 0, omega=arctan2(e_y,e_x)
arg_per_mat[~filter_non0_e_cross_an & filter_non0_ecc] = (
arctan2(
e_vecs[~filter_non0_e_cross_an & filter_non0_ecc, 1],
e_vecs[~filter_non0_e_cross_an & filter_non0_ecc, 0]
)
)
filter_negative_zmom = (
~filter_non0_e_cross_an
& filter_non0_ecc
& (mom[:, 2] < 0.*mom[0, 0])
)
arg_per_mat[filter_negative_zmom] = (
2. * numpy.pi
- arg_per_mat[filter_negative_zmom]
)
# true anomaly
cos_true_anomaly = (e_vecs_unit*pos_unit_vecs).sum(axis=-1)
e_cross_pos = numpy.cross(e_vecs_unit, pos_unit_vecs)
ss2 = numpy.sign((mom_unit_vecs*e_cross_pos).sum(axis=-1))
sin_true_anomaly = ss2*(e_cross_pos**2).sum(axis=1)**0.5
true_anomaly = arctan2(sin_true_anomaly, cos_true_anomaly)
return (
semimajor_axis, eccentricity, true_anomaly,
inc, long_asc_node, arg_per_mat
)
def orbital_elements(*args, **kwargs):
try:
if len(args) == 1:
return get_orbital_elements_from_binary(*args, **kwargs)
elif len(args) == 2:
return get_orbital_elements_from_binaries(*args, **kwargs)
elif len(args) == 3:
return get_orbital_elements_from_arrays(*args, **kwargs)
else:
raise Exception
except Exception as ex:
if not ex.args:
ex.args=()
ex.args = ex.args + ("""
note: orbital elements takes as input either:
- single two particle set,
- two sets of primaries and secondaries
- arrays of rel. position, rel. velocity and masses
""",)
raise
def orbital_elements_for_rel_posvel_arrays(
rel_position_raw, rel_velocity_raw,
total_masses, G=None):
(semimajor_axis, eccentricity, true_anomaly, inc, long_asc_node,
arg_per_mat) = get_orbital_elements_from_arrays(
rel_position_raw, rel_velocity_raw, total_masses, G)
true_anomaly = true_anomaly.value_in(units.deg)
inc = inc.value_in(units.deg)
long_asc_node = long_asc_node.value_in(units.deg)
arg_per_mat = arg_per_mat.value_in(units.deg)
return (
semimajor_axis, eccentricity, true_anomaly,
inc, long_asc_node, arg_per_mat
)
def normalize_vector(vecs, norm, one_dim=False):
"""
normalize array of vector quantities
"""
if one_dim:
vecs_norm = numpy.zeros(vecs.shape)
vecs_norm[0] = vecs[0]/norm
vecs_norm[1] = vecs[1]/norm
vecs_norm[2] = vecs[2]/norm
else:
vecs_norm = numpy.zeros(vecs.shape)
vecs_norm[:, 0] = vecs[:, 0]/norm
vecs_norm[:, 1] = vecs[:, 1]/norm
vecs_norm[:, 2] = vecs[:, 2]/norm
return vecs_norm
| 23,761
| 33.995582
| 91
|
py
|
amuse
|
amuse-main/src/amuse/ext/protodisk.py
|
import numpy
from amuse.ext.evrard_test import body_centered_grid_unit_cube
from amuse.ext.evrard_test import regular_grid_unit_cube
from amuse.ext.evrard_test import uniform_random_unit_cube
from amuse.units import nbody_system
from amuse.units import units
from amuse.datamodel import Particles
from amuse.datamodel import ParticlesWithUnitsConverted
def approximate_inverse_error_function(x):
a=8*(numpy.pi-3)/3*numpy.pi*(4-numpy.pi)
return numpy.sign(x)*numpy.sqrt(
numpy.sqrt((2/numpy.pi/a+numpy.log(1-x**2)/2)**2-numpy.log(1-x**2)/a)-(2/numpy.pi/a+numpy.log(1-x**2)/2)
)
class uniform_unit_cylinder(object):
def __init__(self,targetN, base_grid=None):
cube_cylinder_ratio=numpy.pi*0.5**2
self.targetN=targetN
self.estimatedN=targetN/cube_cylinder_ratio
if base_grid is None:
self.base_grid=uniform_random_unit_cube
else:
self.base_grid=base_grid
def cutout_cylinder(self,x,y,z):
r=x**2+y**2
selection=r < numpy.ones_like(r)
x=x.compress(selection)
y=y.compress(selection)
z=z.compress(selection)
return x,y,z
def make_xyz(self):
if(self.base_grid==uniform_random_unit_cube):
estimatedN=self.estimatedN
x=[]
while len(x) < self.targetN:
estimadedN=estimatedN*1.1+1
x,y,z=self.cutout_cylinder(*(self.base_grid(estimatedN)).make_xyz())
return x[0:self.targetN],y[0:self.targetN],z[0:self.targetN]
else:
return self.cutout_cylinder(*(self.base_grid(self.estimatedN)).make_xyz())
class ProtoPlanetaryDisk(object):
def __init__(self, targetN, convert_nbody = None, discfraction=0.1,
densitypower=1., thermalpower=0.5, Rmin=1,Rmax=100,
gamma=1.,q_out=2.,base_grid=None):
self.targetN=targetN
self.convert_nbody=convert_nbody
self.densitypower=densitypower
self.thermalpower=thermalpower
self.Rmin=Rmin
self.Rmax=Rmax
self.gamma=gamma
self.q_out=q_out
self.discfraction=discfraction
self.a=self.thermalpower
self.a2=self.thermalpower/2
self.g=densitypower
self.g2=2-densitypower
self.k_out=((1+discfraction)/Rmax**3)**0.5
self.sigma_out=self.g2*discfraction/(2*numpy.pi*Rmax**self.g*(Rmax**self.g2-Rmin**self.g2))
self.cs_out=self.q_out*numpy.pi*self.sigma_out/self.k_out
self.base_cylinder=uniform_unit_cylinder(targetN,base_grid)
def sigma(self,r):
return self.sigma_out*(self.Rmax/r)**self.g
def csound(self,r):
return self.cs_out*(self.Rmax/r)**self.a2
def cmass(self,r):
return self.discfraction*(r**self.g2-self.Rmin**self.g2)/(self.Rmax**self.g2-self.Rmin**self.g2)
def mass_encl(self,r):
return 1+self.cmass(r)
def kappa(self,r):
return (self.mass_encl(r)/r**3)**0.5
def toomreQ(self,r):
return self.csound(r)*self.kappa(r)/numpy.pi/self.sigma(r)
def getradius(self,f):
return ((self.Rmax**self.g2-self.Rmin**self.g2)*f+self.Rmin**self.g2)**(1./self.g2)
def zscale(self,r):
return self.csound(r)/self.kappa(r)
def u(self,r):
if self.gamma ==1.:
return self.csound(r)**2
else:
return self.csound(r)**2/(self.gamma-1)
def vcirc(self,r):
return (self.mass_encl(r)/r)**0.5
def new_model(self):
x,y,z=self.base_cylinder.make_xyz()
self.actualN=len(x)
f=x**2+y**2
r=f**0.5
rtarget=self.getradius(f)
mass=self.discfraction*numpy.ones_like(x)/self.actualN
internal_energy=self.u(rtarget)
zscale=self.zscale(rtarget)
r=r.clip(1.e-8,2.)
x=x/r
y=y/r
vx=-y*self.vcirc(rtarget)
vy=x*self.vcirc(rtarget)
vz=numpy.zeros_like(x)
x=rtarget*x
y=rtarget*y
z=approximate_inverse_error_function(z)*zscale*2.**0.5
return (mass, x, y, z, vx, vy, vz, internal_energy)
@property
def result(self):
masses, x,y,z, vx,vy,vz, internal_energies = self.new_model()
result = Particles(self.actualN)
result.mass = nbody_system.mass.new_quantity(masses)
result.x = nbody_system.length.new_quantity(x)
result.y = nbody_system.length.new_quantity(y)
result.z = nbody_system.length.new_quantity(z)
result.vx = nbody_system.speed.new_quantity(vx)
result.vy = nbody_system.speed.new_quantity(vy)
result.vz = nbody_system.speed.new_quantity(vz)
result.u = nbody_system.specific_energy.new_quantity(internal_energies)
if not self.convert_nbody is None:
result = ParticlesWithUnitsConverted(result, self.convert_nbody.as_converter_from_si_to_generic())
result = result.copy()
return result
| 5,030
| 32.765101
| 110
|
py
|
amuse
|
amuse-main/src/amuse/ext/basicgraph.py
|
"""
basic graph class and algorithmes
UnionFind and MinimumSpanningTree taken from PADS:
a library of Python Algorithms and Data Structures
implemented by David Eppstein of the University of California, Irvine.
The current version of PADS may be found at
<http://www.ics.uci.edu/~eppstein/PADS/>, as individual files or as a
git repository that may be copied by the command line
git clone http://www.ics.uci.edu/~eppstein/PADS/.git
PADS is hereby placed in the public domain; you may use the code in PADS
for any purpose whatsoever. We make no guarantee of quality,
completeness, correctness, persistence or consistency of APIs, or support.
"""
class UnionFind(object):
"""Union-find data structure.
Each unionFind instance X maintains a family of disjoint sets of
hashable objects, supporting the following two methods:
- X[item] returns a name for the set containing the given item.
Each set is named by an arbitrarily-chosen one of its members; as
long as the set remains unchanged it will keep the same name. If
the item is not yet part of a set in X, a new singleton set is
created for it.
- X.union(item1, item2, ...) merges the sets containing each item
into a single larger set. If any item is not yet part of a set
in X, it is added to X as one of the members of the merged set.
"""
def __init__(self):
"""Create a new empty union-find structure."""
self.weights = {}
self.parents = {}
def __getitem__(self, object):
"""Find and return the name of the set containing the object."""
# check for previously unknown object
if object not in self.parents:
self.parents[object] = object
self.weights[object] = 1
return object
# find path of objects leading to the root
path = [object]
root = self.parents[object]
while root != path[-1]:
path.append(root)
root = self.parents[root]
# compress the path and return
for ancestor in path:
self.parents[ancestor] = root
return root
def __iter__(self):
"""Iterate through all items ever found or unioned by this structure."""
return iter(self.parents)
def union(self, *objects):
"""Find the sets containing the objects and merge them all."""
roots = [self[x] for x in objects]
heaviest = max([(self.weights[r],r) for r in roots], key = lambda x: x[0])[1]
for r in roots:
if r != heaviest:
self.weights[heaviest] += self.weights[r]
self.parents[r] = heaviest
def sets(self):
sets={}
for v in self.parents:
sets.setdefault(self[v],set()).add(v)
return list(sets.values())
class Graph(dict):
def add_edge(self, n1, n2, w):
if callable(w): w=w(n1,n2)
self.setdefault(n1, {}).update({n2: w})
self.setdefault(n2, {}).update({n1: w})
def remove_edge(self, n1, n2):
self[n1].pop(n2)
self[n2].pop(n1)
def add_node(self,n):
self.setdefault(n, {})
def all_edges(self):
return [(self[u][v],u,v) for u in self for v in self[u]]
def MinimumSpanningTree(G):
"""
Return the minimum spanning tree of an undirected graph G.
G should be represented in such a way that G[u][v] gives the
length of edge u,v, and G[u][v] should always equal G[v][u].
The tree is returned as a list of edges.
"""
# Kruskal's algorithm: sort edges by weight, and add them one at a time.
# We use Kruskal's algorithm, first because it is very simple to
# implement once UnionFind exists, and second, because the only slow
# part (the sort) is sped up by being built in to Python.
subtrees = UnionFind()
tree = []
edges = [(G[u][v],u,v) for u in G for v in G[u]]
edges.sort(key=lambda x:x[0])
for W,u,v in edges:
if subtrees[u] != subtrees[v]:
tree.append((W,u,v))
subtrees.union(u,v)
return tree
def MinimumSpanningTreeFromEdges(edges):
"""
Return the minimum spanning tree of an undirected graph G.
This version runs directly from an edgelist. An edge is a triple
(w,u,v), such that u,v are nodes, w is the length of the edge.
The tree is returned as a list of edges.
"""
# Kruskal's algorithm: sort edges by weight, and add them one at a time.
# We use Kruskal's algorithm, first because it is very simple to
# implement once UnionFind exists, and second, because the only slow
# part (the sort) is sped up by being built in to Python.
subtrees = UnionFind()
tree = []
edges.sort(key=lambda x:x[0])
for W,u,v in edges:
if subtrees[u] != subtrees[v]:
tree.append((W,u,v))
subtrees.union(u,v)
return tree
def ConnectedComponents(G):
"""
Return the connected components of a graph. G should be
represented in such a way that G[u] gives the edges from u in a way
that and if v in G[u] than u in G[v]. the connected components are
returned as sets of nodes.
"""
u=UnionFind()
for v in G:
nset=set(G[v])
nset.add(v)
u.union(*nset)
return u.sets()
def ConnectedComponentsFromEdges(edges):
"""
Return the connected components of a graph from a list of egdes.
the connected components are returned as sets of nodes. note this does
not find singletons.
"""
u=UnionFind()
for e in edges:
u.union(e[1],e[2])
return u.sets()
if __name__=="__main__":
graph = Graph()
graph.add_edge(0, 1, 1.0)
graph.add_edge(1, 2, 1.0)
graph.add_edge(2, 0, 1.0)
graph.add_edge(3, 4, 1.0)
graph.add_edge(4, 5, 1.0)
graph.add_edge(5, 3, 1.0)
print(graph[0])
first, second = ConnectedComponents(graph)
print(first)
print(second)
print(MinimumSpanningTree(graph))
| 6,021
| 30.528796
| 85
|
py
|
amuse
|
amuse-main/src/amuse/ext/__init__.py
| 0
| 0
| 0
|
py
|
|
amuse
|
amuse-main/src/amuse/ext/evrard_test.py
|
"""
initial conditions for the SPH evrard collapse test
"""
import numpy
from math import *
from amuse.units import nbody_system
from amuse.units import units
from amuse.datamodel import Particles
from amuse.datamodel import ParticlesWithUnitsConverted
from amuse.ext.sobol import i4_sobol_generate
class uniform_random_unit_cube(object):
def __init__(self,targetN):
self.targetN = int(targetN)
self.par=int(targetN)
def make_xyz(self):
x=numpy.random.uniform(-1.,1.,self.par)
y=numpy.random.uniform(-1.,1.,self.par)
z=numpy.random.uniform(-1.,1.,self.par)
return x,y,z
class sobol_unit_cube(object):
def __init__(self,targetN):
self.targetN=int(targetN)
def make_xyz(self):
x, y, z = i4_sobol_generate(3, self.targetN, 3) * 2.0 - 1.0
return x,y,z
class regular_grid_unit_cube(object):
def __init__(self,targetN):
self.targetN=int(targetN)
self.par=int(float(targetN)**(1./3.)+1.5)
def make_xyz(self):
nf=self.par
dnf=1./(nf)
x,y,z=numpy.mgrid[-1.+dnf:1.-dnf:nf*1j,-1.+dnf:1.-dnf:nf*1j,-1.+dnf:1.-dnf:nf*1j]
x=x.flatten()
y=y.flatten()
z=z.flatten()
return x,y,z
class body_centered_grid_unit_cube(object):
def __init__(self,targetN):
self.targetN=int(targetN)
self.par=int(float(targetN/2.)**(1./3.)+1.5)
def make_xyz(self):
nf=self.par
x1,y1,z1=numpy.mgrid[-1.:1.:nf*1j,
-1.:1.:nf*1j,
-1.:1.:nf*1j]
x2,y2,z2=numpy.mgrid[-1.+1./nf:1.-1./nf:(nf-1)*1j,
-1.+1./nf:1.-1./nf:(nf-1)*1j,
-1.+1./nf:1.-1./nf:(nf-1)*1j]
x=numpy.concatenate( (x1.flatten(),x2.flatten()) )
y=numpy.concatenate( (y1.flatten(),y2.flatten()) )
z=numpy.concatenate( (z1.flatten(),z2.flatten()) )
a=numpy.where((x>=-1) & (y>=-1) & (z>=-1) & (x<1) & (y<1) & (z<1) )[0]
return x[a],y[a],z[a]
class glass_unit_cube(object):
def __init__(self,targetN,target_rms=0.01):
self.targetN=int(targetN)
self.target_rms=target_rms
if target_rms < 0.0001:
print("warning: target_rms may not succeed")
if targetN < 1000:
print("warning: not enough particles")
def make_xyz(self):
from amuse.community.fi.interface import Fi
N=self.targetN
target_rms=self.target_rms
L=1| nbody_system.length
dt=0.01 | nbody_system.time
x,y,z=uniform_random_unit_cube(N).make_xyz()
vx,vy,vz=uniform_unit_sphere(N).make_xyz()
p=Particles(N)
p.x=L*x
p.y=L*y
p.z=L*z
p.h_smooth=0. * L
p.vx= 0.1*vx | (nbody_system.speed)
p.vy= 0.1*vy | (nbody_system.speed)
p.vz= 0.1*vz | (nbody_system.speed)
p.u= (0.1*0.1) | nbody_system.speed**2
p.mass=(8./N) | nbody_system.mass
sph=Fi(use_gl=False,mode='periodic',redirection='none')
sph.initialize_code()
sph.parameters.use_hydro_flag=True
sph.parameters.radiation_flag=False
sph.parameters.self_gravity_flag=False
sph.parameters.gamma=1.
sph.parameters.isothermal_flag=True
sph.parameters.integrate_entropy_flag=False
sph.parameters.timestep=dt
sph.parameters.verbosity=0
sph.parameters.periodic_box_size=2*L
sph.parameters.artificial_viscosity_alpha = 1.
sph.parameters.beta = 2.
sph.commit_parameters()
sph.gas_particles.add_particles(p)
sph.commit_particles()
# sph.start_viewer()
t=0. | nbody_system.time
rms=1.
minrms=1.
i=0
while rms > target_rms:
i+=1
t=t+(0.25 | nbody_system.time)
sph.evolve_model(t)
rho=sph.particles.rho.value_in(nbody_system.density)
rms=rho.std()/rho.mean()
minrms=min(minrms,rms)
if rms>2.*minrms or i>300:
print(" RMS(rho) convergence warning:", i, rms,minrms)
if i>100000:
print("i> 100k steps - not sure about this...")
print(" rms:", rms)
break
x=sph.particles.x.value_in(nbody_system.length)
y=sph.particles.y.value_in(nbody_system.length)
z=sph.particles.z.value_in(nbody_system.length)
del sph
return x,y,z
def uniform_unit_cube(targetN, base_grid=None):
if base_grid is None:
return body_centered_grid_unit_cube(targetN)
else:
return base_grid(targetN)
class uniform_unit_sphere(object):
def __init__(self,targetN, base_grid=None):
cube_sphere_ratio=4/3.*numpy.pi*0.5**3
self.targetN=int(targetN)
self.estimatedN=targetN/cube_sphere_ratio
if base_grid is None:
self.base_grid=uniform_random_unit_cube
else:
self.base_grid=base_grid
def cutout_sphere(self,x,y,z):
r=x**2+y**2+z**2
selection=r < numpy.ones_like(r)
x=x.compress(selection)
y=y.compress(selection)
z=z.compress(selection)
return x,y,z
def make_xyz(self):
if(self.base_grid==uniform_random_unit_cube):
estimatedN=self.estimatedN
x=[]
while len(x) < self.targetN:
estimadedN=estimatedN*1.1+1
x,y,z=self.cutout_sphere(*(self.base_grid(int(estimatedN))).make_xyz())
return x[0:self.targetN],y[0:self.targetN],z[0:self.targetN]
else:
return self.cutout_sphere(*(self.base_grid(int(self.estimatedN))).make_xyz())
class MakeEvrardTest(object):
def __init__(self, targetN, base_grid=None, size=1.,
mass=1.,internal_energy=0.05,seed=345672):
numpy.random.seed(seed)
self.targetN = int(targetN)
self.size=size
self.mass=mass
self.internal_energy=internal_energy
self.base_sphere=uniform_unit_sphere(targetN,base_grid)
def new_model(self):
x,y,z=self.base_sphere.make_xyz()
self.actualN=len(x)
r=numpy.sqrt(x**2+y**2+z**2)
rtarget=self.size*r**1.5
mass=numpy.ones_like(x)/self.actualN
internal_energy=numpy.ones_like(x)*self.internal_energy
r=r.clip(1.e-8,2*self.size)
x=rtarget*x/r
y=rtarget*y/r
z=rtarget*z/r
vx=numpy.zeros_like(x)
vy=numpy.zeros_like(x)
vz=numpy.zeros_like(x)
return (mass,x,y,z,vx,vy,vz,internal_energy)
class MakeEvrardModel(object):
def __init__(self, target_number_of_particles, convert_nbody = None, base_grid = None,
internal_energy = 0.05, do_scale = False, seed = None,size=1.):
self.target_number_of_particles = target_number_of_particles
self.convert_nbody = convert_nbody
self.internal_energy = internal_energy
self.size=size
self.do_scale = do_scale
self.base_sphere = uniform_unit_sphere(target_number_of_particles, base_grid)
numpy.random.seed(seed)
def new_model(self):
x, y, z = self.base_sphere.make_xyz()
self.actual_number_of_particles = len(x)
r = numpy.sqrt(x**2+y**2+z**2)
rtarget = self.size*r**1.5
mass = numpy.ones_like(x)/self.actual_number_of_particles
internal_energy = numpy.ones_like(x)*self.internal_energy
r = r.clip(1.e-8, 2.0*self.size)
x = rtarget*x/r
y = rtarget*y/r
z = rtarget*z/r
vx = numpy.zeros_like(x)
vy = numpy.zeros_like(x)
vz = numpy.zeros_like(x)
return (mass, numpy.hstack((x, y, z)), numpy.hstack((vx, vy, vz)), internal_energy)
@property
def result(self):
masses, positions, velocities, internal_energies = self.new_model()
result = Particles(self.actual_number_of_particles)
result.mass = nbody_system.mass.new_quantity(masses)
result.position = nbody_system.length.new_quantity(positions)
result.velocity = nbody_system.speed.new_quantity(velocities)
result.u = nbody_system.specific_energy.new_quantity(internal_energies)
result.position -= result.center_of_mass()
if self.do_scale:
scale_factor = (result.potential_energy(G=nbody_system.G)) / (-0.5 | nbody_system.energy)
result.position *= scale_factor
if not self.convert_nbody is None:
result = ParticlesWithUnitsConverted(result, self.convert_nbody.as_converter_from_si_to_generic())
result = result.copy()
return result
"""
Create an evrard gas sphere with approximately the given number of particles.
Returns a set of particles with equal mass and specific internal energy.
Positions are randomly distributed to fit an evrard gas distribution model
(density proportional to r^-1). Velocities are set to zero initially. The
model is centered around the origin. Positions are optionally scaled such
that the potential energy is -0.5 in nbody-units.
:argument target_number_of_particles: Target number of particles to include in the model
:argument convert_nbody: When given will convert the resulting set to SI units
:argument internal_energy: The specific internal energy of each particle (defaults to 0.05)
:argument do_scale: scale the positions to exact nbody units (U=-0.5)
:argument seed: Seed for the random number generator
"""
def new_evrard_gas_sphere(target_number_of_particles, *list_arguments, **keyword_arguments):
uc = MakeEvrardModel(target_number_of_particles, *list_arguments, **keyword_arguments)
return uc.result
if __name__=="__main__":
x,y,z=uniform_unit_sphere(10000).make_xyz()
print(len(x))
| 9,939
| 35.145455
| 110
|
py
|
amuse
|
amuse-main/src/amuse/ext/polarsupport.py
|
import numpy as np
class PolarSupport(object):
def __init__(self):
pass
def homogeneous_sphere_N(self, N):
#6/pi is the volumetric ratio of a cube
#and a fitting sphere
size_is_not_N = True
while (size_is_not_N):
M = 2.0*(np.random.random([N*6/np.pi, 3]) - 0.5 * np.ones([N*6/np.pi, 3]))
norms = (M[::,0]*M[::,0]+\
M[::,1]*M[::,1]+\
M[::,2]*M[::,2])**0.5
selection = np.where(norms<1.0)
if len(selection[0])>=N:
size_is_not_N = False
Msphere = M[selection,0:3][0]
inv_norms_fit = 1.0/norms[selection]
Shell = np.array(np.diag(inv_norms_fit)*np.matrix(Msphere))
x = Shell[0:N,0]
y = Shell[0:N,1]
z = Shell[0:N,2]
return x, y, z
def phase_to_polar(self, x, y, z, vx, vy, vz):
r = (x**2 + y**2 + z**2)**0.5
vr = (vx*x + vy*y + vz*z) / (x**2 + y**2 + z**2)**0.5
vt = ((vx**2 + vy**2 + vz**2) - vr**2)**0.5
return r, vr, vt
def position_to_polar(self, x, y, z):
return (x**2 + y**2 + z**2)**0.5
def phase_to_cartesian(self, ra, vr, vt):
ex, ey, ez = self.homogeneous_sphere_N(len(ra))
x = ex * ra
y = ey * ra
z = ez * ra
n1 = 1.0/np.sqrt(x**2+y**2)
n2 = 1.0/np.sqrt(x**2+z**2)
t1x = -y*n1
t1y = x*n1
t1z = np.zeros(len(x))
t2x = -z*n2
t2y = np.zeros(len(x))
t2z = x*n2
theta = np.random.random(len(ra))
s = np.cos(theta)
t = np.sin(theta)
vx = vt * (s*t1x + t*t2x)
vy = vt * (s*t1y + t*t2y)
vz = vt * (s*t1z + t*t2z)
return x, y, z, vx, vy, vz, ex, ey, ez
def position_to_cartesian(self, ra):
return ra * self.homogeneous_sphere_N(len(ra))
| 1,917
| 26.797101
| 86
|
py
|
amuse
|
amuse-main/src/amuse/ext/stellar_gyration_radius.py
|
"""
Calculate the radius of gyration for a star
"""
import sys
import numpy
from amuse.lab import *
HeWhiteDwarf = 10 | units.stellar_type
Hertzsprung_gap = 2 | units.stellar_type
First_Asymptotic_Giant_Branch = 5 | units.stellar_type
Second_Asymptotic_Giant_Branch = 6 | units.stellar_type
def calculate_gyration_radius(star):
I = moment_of_inertia(star)
k2 = I/(star.mass*star.radius**2)
return k2
def get_mass_profile(star):
# if hasattr(star, "get_mass_profile"):
# mass_profile = star.get_mass_profile()* star.mass
# else:
radius_profile = star.get_radius_profile()
density_profile = star.get_density_profile()
radii_cubed = radius_profile**3
radii_cubed.prepend(0|units.m**3)
mass_profile = (4.0/3.0 * numpy.pi) * density_profile * (radii_cubed[1:] - radii_cubed[:-1])
print("Derived mass profile from density and radius.")
return mass_profile
def moment_of_inertia(star):
#Moment of inertia of the Sun: (I/MR^2) = 0.059
radius_profile = star.get_radius_profile()
density_profile = star.get_density_profile()
I = zero
dr = radius_profile[1:]-radius_profile[:-1]
I = density_profile[:-1] * radius_profile[:-1]**4 * dr
I = I.sum() * (8*numpy.pi/3.)
return I
def main(Mstar, z):
# stellar = MESA()
stellar = EVtwin()
stellar.parameters.metallicity = z
star = stellar.particles.add_particle(Particle(mass=Mstar))
while star.stellar_type<Second_Asymptotic_Giant_Branch:
stellar.evolve_model()
k2 = calculate_gyration_radius(star)
gamma = 0
vcrit = (constants.G*star.mass*(1-gamma)/star.radius).sqrt()
Omega = (vcrit/star.radius)
J = star.mass *k2 *star.radius**2 * Omega
print("Star: t=", star.age, "r=", star.radius, "k2=", k2, "J=", J.in_(units.MSun*units.cm**2/units.s), "type=", star.stellar_type)
stellar.stop()
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-M", unit=units.MSun,
dest="Mstar", type="float",default = 10.|units.MSun,
help="stellar mass [1] %unit")
result.add_option("-z", dest="z", type="float", default = 0.02,
help="metalicity [0.02]")
return result
if __name__ in ('__main__', '__plot__'):
set_printing_strategy("custom",
preferred_units =
[units.MSun, units.RSun, units.Myr],
precision = 5, prefix = "",
separator = " [", suffix = "]")
o, arguments = new_option_parser().parse_args()
main(**o.__dict__)
| 2,688
| 33.922078
| 138
|
py
|
amuse
|
amuse-main/src/amuse/ext/cloud.py
|
import inspect
import numpy
from amuse.units import generic_unit_system
from amuse import datamodel
def fill_grid_with_cloud_and_medium(
grid,
center = None,
radius = None,
rho_medium = 1.0 | generic_unit_system.mass / generic_unit_system.length**3,
rho_cloud = 0.1 | generic_unit_system.mass / generic_unit_system.length**3,
gamma = 5.0 / 3.0,
):
pass
def fill_grid_with_spherical_cloud(
grid,
center = None,
radius = None,
rho = 1.0 | generic_unit_system.mass / generic_unit_system.length**3,
rhovx = 0.0 | generic_unit_system.mass / (generic_unit_system.time * generic_unit_system.length**2),
rhovy = 0.0 | generic_unit_system.mass / (generic_unit_system.time * generic_unit_system.length**2),
rhovz = 0.0 | generic_unit_system.mass / (generic_unit_system.time * generic_unit_system.length**2),
energy = 1.0 | generic_unit_system.mass / (generic_unit_system.time**2 * generic_unit_system.length),
subgridsize = 4,
):
radii = (grid.position - center).lengths()
if subgridsize <= 1:
selection = radii <= radius
else:
dr = grid.cellsize().length()
selection = radii < (radius - dr)
grid.rho[selection] = rho(radii) if inspect.isroutine(rho) else rho
grid.rhovx[selection] = rhovx
grid.rhovy[selection] = rhovy
grid.rhovz[selection] = rhovz
grid.energy[selection] = energy
if subgridsize <= 1:
return
selection = numpy.logical_and( radii >= (radius-dr), radii <= (radius+dr))
subgrid = datamodel.Grid.create((subgridsize, subgridsize, subgridsize), grid.cellsize())
subgrid.x -= grid.cellsize()[0] / 2.0
subgrid.y -= grid.cellsize()[1] / 2.0
subgrid.z -= grid.cellsize()[2] / 2.0
x_indices, y_indices, z_indices = grid.indices()
x_indices = x_indices[selection]
y_indices = y_indices[selection]
z_indices = z_indices[selection]
position = subgrid.position
centers = center - grid.position[selection]
subgrid_rho = rho * numpy.ones_like(subgrid.x.number)
subgrid_rhovx = rhovx * numpy.ones_like(subgrid.x.number)
subgrid_rhovy = rhovy * numpy.ones_like(subgrid.x.number)
subgrid_rhovz = rhovz * numpy.ones_like(subgrid.x.number)
subgrid_energy = energy * numpy.ones_like(subgrid.x.number)
update_grid_rho = grid.rho[selection]
update_grid_rhovx = grid.rhovx[selection]
update_grid_rhovy = grid.rhovy[selection]
update_grid_rhovz = grid.rhovz[selection]
update_grid_energy = grid.energy[selection]
for i in range(len(x_indices)):
x_index = x_indices[i]
y_index = y_indices[i]
z_index = z_indices[i]
center_of_cloud_for_subgrid = centers[i]
radii = (position - center_of_cloud_for_subgrid).lengths()
subgrid_rho[...] = update_grid_rho[i]
subgrid_rhovx[...] = update_grid_rhovx[i]
subgrid_rhovy[...] = update_grid_rhovy[i]
subgrid_rhovz[...] = update_grid_rhovz[i]
subgrid_energy[...] = update_grid_energy[i]
subgrid_selection = radii <= radius
subgrid_rho[subgrid_selection] = rho
subgrid_rhovx[subgrid_selection] = rhovx
subgrid_rhovy[subgrid_selection] = rhovy
subgrid_rhovz[subgrid_selection] = rhovz
subgrid_energy[subgrid_selection] = energy
update_grid_rho[i] = subgrid_rho.mean()
update_grid_rhovx[i] = subgrid_rhovx.mean()
update_grid_rhovy[i] = subgrid_rhovy.mean()
update_grid_rhovz[i] = subgrid_rhovz.mean()
update_grid_energy[i] = subgrid_energy.mean()
grid.rho[selection] = update_grid_rho
grid.rhovx[selection] = update_grid_rhovx
grid.rhovy[selection] = update_grid_rhovy
grid.rhovz[selection] = update_grid_rhovz
grid.energy[selection] = update_grid_energy
def fill_grid_with_cloud_shock(
grid,
center = None,
radius = None,
ratio_densities = 10.0,
mach_number = 2.7,
gamma = 5.0/3.0,
subgridsize = 4,
):
velocity_unit = generic_unit_system.length / generic_unit_system.time
momentum_unit = generic_unit_system.mass / (generic_unit_system.time * generic_unit_system.length**2)
density_unit = generic_unit_system.mass / generic_unit_system.length**3
energy_unit = generic_unit_system.mass / (generic_unit_system.time**2 * generic_unit_system.length)
velocity_of_medium = (numpy.sqrt(gamma*(gamma-1.0)*ratio_densities) * mach_number) | velocity_unit
rho_in_cloud = 1.0 | density_unit
rhovx_in_cloud = 0.0 | momentum_unit
rhovy_in_cloud = 0.0 | momentum_unit
rhovz_in_cloud = 0.0 | momentum_unit
energy_in_cloud = 1.0 | energy_unit
rho_in_medium = 1.0 / ratio_densities | density_unit
rhovx_in_medium = 0.0 | momentum_unit
rhovy_in_medium = rho_in_medium * velocity_of_medium
rhovz_in_medium = 0.0 | momentum_unit
energy_in_medium = (1.0 | energy_unit) + (0.5* rho_in_medium * velocity_of_medium**2)
grid.rho = rho_in_medium
grid.rhovx = rhovx_in_medium
grid.rhovy = rhovy_in_medium
grid.rhovz = rhovz_in_medium
grid.energy = energy_in_medium
fill_grid_with_spherical_cloud(grid, center, radius, rho_in_cloud, rhovx_in_cloud, rhovy_in_cloud, rhovz_in_cloud, energy_in_cloud, subgridsize)
| 5,473
| 36.238095
| 148
|
py
|
amuse
|
amuse-main/src/amuse/ext/stellar_wind.py
|
import numpy
from amuse.support.exceptions import AmuseException
from amuse.datamodel import Particles
from amuse.units import units, quantities, constants
def kudritzki_wind_velocity(mass, radius, luminosity, temperature,
Y=0.25, I_He=2):
"""
This routine calculates the escape and terminal wind velocity. The
Equations are taken from Kudritzki & Puls, Annual Reviews of Astronomy
and Astrophysics, 2000, Vol. 38, p.613-666 Equation (8) and (9) and
Kudritzki et al., 1989, A&A 219, 205 Equation (64) and (65).
I_He: Number of electrons per He nucleus (= 2 in O-Stars)
sigma_e: Thomson absorption coefficient
Gamma: Ratio of radiative Thomson to gravitational acceleration
"""
sigma_e = 0.398 * (1 + I_He*Y)/(1 + 4*Y)
Gamma = 7.66E-5 * sigma_e * (luminosity.value_in(units.LSun)
/ mass.value_in(units.MSun))
v_esc = (2*constants.G * mass / radius*(1 - Gamma))**0.5
condlist = [temperature >= 21000. | units.K,
(10000. | units.K < temperature) &
(temperature < 21000. | units.K),
temperature <= 10000. | units.K]
choicelist = [2.65, 1.4, 1.0]
return v_esc * numpy.select(condlist, choicelist)
class PositionGenerator(object):
def __init__(self, grid_type="regular", rotate=True):
self.cube_generator = {
"random": self.random_cube,
"regular": self.regular_grid_unit_cube,
}[grid_type]
self.rotate = rotate
def as_three_vector(self, array):
number = array
if quantities.is_quantity(array):
number = array.number
three_vector = numpy.transpose([number]*3)
if quantities.is_quantity(array):
three_vector = three_vector | array.unit
return three_vector
def regular_grid_unit_cube(self, N):
n = int(numpy.ceil(N**(1./3.)))
start = -1. + 1./n
stop = 1. - 1./n
# complex step number tells mgrid to work like linspace
step = n*1j
grid = numpy.mgrid[start: stop: step,
start: stop: step,
start: stop: step]
grid = grid.reshape(3, n**3)
grid = grid.transpose()
grid = grid[numpy.random.choice(n**3, size=N, replace=False)]
return grid
def random_cube(self, N):
numbers = numpy.random.uniform(-1., 1., 3 * N)
return numpy.reshape(numbers, (N, 3))
def cutout_sphere(self, positions, rmin):
r = numpy.sqrt((positions**2).sum(1))
return positions[(r >= rmin) & (r < 1)]
def random_rotation(self):
u, v, r = numpy.random.rand(3)
theta = 2. * numpy.pi * u
phi = numpy.arccos(2. * v - 1.)
axis = (numpy.sin(theta) * numpy.cos(phi),
numpy.sin(theta) * numpy.sin(phi),
numpy.cos(theta))
axis = numpy.asarray(axis)
angle = 2. * numpy.pi * r
return axis, angle
def rotation_matrix(self, axis, angle):
""" Using the Euler-Rodrigues formula """
axis = numpy.asarray(axis)
theta = numpy.asarray(angle)
axis = axis/numpy.sqrt(numpy.dot(axis, axis))
a = numpy.cos(theta/2.)
b, c, d = -axis * numpy.sin(theta/2.)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
m = [[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]]
return numpy.asarray(m)
def rotate_positions(self, positions, axis, angle):
matrix = self.rotation_matrix(axis, angle)
rotated = [matrix.dot(p) for p in positions]
return numpy.asarray(rotated)
def uniform_hollow_sphere(self, N, rmin):
cube_sphere_ratio = 4/3. * numpy.pi * 0.5**3 * (1 - rmin**3)
estimatedN = N / cube_sphere_ratio
while True:
estimatedN = estimatedN * 1.1 + 1
cube = self.cube_generator(int(estimatedN))
hollow_sphere = self.cutout_sphere(cube, rmin)
if len(hollow_sphere) >= N:
break
if self.rotate:
axis, angle = self.random_rotation()
hollow_sphere = self.rotate_positions(hollow_sphere, axis, angle)
return hollow_sphere[:N]
def generate_positions(self, N, rmin, rmax, radius_function=None,
star=None):
"""
The particles start out in a (random) position between
the surface of the star and the distance that the
previously released particles have reached.
This assumes that the current wind velocity is
comparable to the previous wind velocity.
Note that the stellar position is not added yet here.
"""
positions = self.uniform_hollow_sphere(N, 1. * rmin / rmax)
vector_lengths = numpy.sqrt((positions**2).sum(1))
unit_vectors = positions/self.as_three_vector(vector_lengths)
int_v_over_total = (((vector_lengths * rmax)**3 - rmin**3)
/ (rmax**3 - rmin**3))
if radius_function is not None:
distance = radius_function(int_v_over_total, rmax, star)
else:
distance = int_v_over_total * (rmax - rmin) + rmin
position = unit_vectors * self.as_three_vector(distance)
return position, unit_vectors
class StarsWithMassLoss(Particles):
def __init__(self, *args, **kwargs):
super(StarsWithMassLoss, self).__init__(*args, **kwargs)
self._private.timestamp = 0. | units.yr
self._private.previous_time = 0. | units.yr
self._private.track_mechanical_energy = False
self._private.new_unset_lmech_particles = False
self._private.attribute_names = set(
[
"lost_mass",
"wind_release_time",
"mu",
"mass",
"radius",
"age",
"temperature",
"luminosity",
"stellar_type",
"x",
"y",
"z",
"vx",
"vy",
"vz",
"wind_mass_loss_rate",
"initial_wind_velocity",
"terminal_wind_velocity",
"mass_loss_type",
]
)
self._private.defaults = dict(
lost_mass=0 | units.MSun,
mass=0 | units.MSun,
radius=0 | units.RSun,
age=0 | units.Myr,
temperature=0 | units.K,
luminosity=0 | units.LSun,
stellar_type=1 | units.stellar_type,
x=0 | units.m,
y=0 | units.m,
z=0 | units.m,
vx=0 | units.ms,
vy=0 | units.ms,
vz=0 | units.ms,
wind_mass_loss_rate=0 | units.MSun/units.yr,
initial_wind_velocity=0 | units.ms,
terminal_wind_velocity=0 | units.ms,
mechanical_energy=0 | units.J,
mass_loss_type="wind",
)
self.set_global_mu()
def add_particles(self, particles, *args, **kwargs):
new_particles = super(StarsWithMassLoss, self).add_particles(
particles, *args, **kwargs)
return new_particles
def can_extend_attributes(self):
return False
def get_attribute_names_defined_in_store(self):
return list(self._private.attribute_names) if len(self) > 0 else []
def add_particles_to_store(self, keys, attributes=[], values=[]):
good_attributes = self._private.attribute_names
# add default values if missing
for attr in good_attributes:
if attr not in attributes:
attributes.append(attr)
if attr == "wind_release_time":
value = (
self.collection_attributes.timestamp
or 0 | units.yr
)
elif attr == "previous_age":
if "age" in attributes:
value = values[attributes.index("age")]
else:
value = 0 | units.yr
elif attr == "previous_mass":
if "mass" in attributes:
value = values[attributes.index("mass")]
else:
value = 0 | units.MSun
elif attr == 'previous_mechanical_luminosity':
value = -1 | units.W
self._private.new_unset_lmech_particles = True
else:
value = self._private.defaults[attr]
values.append(value)
# remove unsupported attributes
if len(attributes) > len(good_attributes):
values = [v for i, v in enumerate(values)
if attributes[i] in good_attributes]
attributes = [a for a in attributes if a in good_attributes]
super(StarsWithMassLoss, self).add_particles_to_store(
keys, attributes, values)
def set_values_in_store(self, indices, attributes, list_of_values_to_set):
for attr in attributes:
if attr not in self._private.attribute_names:
raise AttributeError(
"You tried to set attribute '{0}'"
" but this attribute is not accepted for this set."
.format(attr)
)
# TODO
super(StarsWithMassLoss, self).set_values_in_store(
indices, attributes, list_of_values_to_set)
def add_calculated_attribute(self, name_of_the_attribute, *args, **kwargs):
if name_of_the_attribute in self._private.attribute_names:
self._private.attribute_names.remove(name_of_the_attribute)
del self._private.defaults[name_of_the_attribute]
super(StarsWithMassLoss, self).add_calculated_attribute(
name_of_the_attribute, *args, **kwargs)
def evolve_mass_loss(self, time):
if self._private.previous_time >= time:
return
elapsed_time = time - self._private.previous_time
self.lost_mass += elapsed_time * self.wind_mass_loss_rate
if self._private.track_mechanical_energy:
new_mechanical_luminosity = (0.5 * self.wind_mass_loss_rate
* self.terminal_wind_velocity**2)
if self._private.new_unset_lmech_particles:
i_new = self.previous_mechanical_luminosity < quantities.zero
self[i_new].previous_mechanical_luminosity =\
new_mechanical_luminosity[i_new]
self._private.new_unset_lmech_particles = False
average_mechanical_luminosity = 0.5 * (
self.previous_mechanical_luminosity
+ new_mechanical_luminosity)
self.mechanical_energy += (elapsed_time
* average_mechanical_luminosity)
self.previous_mechanical_luminosity = new_mechanical_luminosity
self.collection_attributes.timestamp = time
self._private.previous_time = time
def track_mechanical_energy(self, track=True):
self._private.track_mechanical_energy = track
mech_attrs = set(
["mechanical_energy", "previous_mechanical_luminosity"]
)
if track:
self._private.attribute_names |= mech_attrs
else:
self._private.attribute_names -= mech_attrs
def set_global_mu(self, mu=None, Y=0.25, Z=0.02, x_ion=0.1):
"""
Set the global value of mu used to create stellar wind.
if mu is added directly, Y (Helium fraction), Z (metal fraction)
and x_ion (percentage of ionized atoms) are ignored.
An alternative way is to set mu for each star separately.
"""
if mu is None:
X = 1.0 - Y - Z
ion_num = X*(1.0+x_ion) + Y*(1.0+2.0*x_ion)/4.0 + Z*x_ion/2.0
mu = constants.proton_mass / ion_num
self.mu = mu
self._private.defaults['mu'] = mu
def reset(self):
self.lost_mass = 0.0 | units.MSun
self.set_begin_time(0. | units.yr)
def set_begin_time(self, time):
self.wind_release_time = time
self.collection_attributes.timestamp = time
self._private.previous_time = time
class EvolvingStarsWithMassLoss(StarsWithMassLoss):
"""
Derive the stellar wind from stellar evolution.
You have to copy the relevant attributes from the stellar evolution.
This can be done using a channel like:
chan = stellar_evolution.particles.new_channel_to(
stellar_wind.particles,
attributes=["age", "radius", "mass", "luminosity", "temperature"])
while <every timestep>:
chan.copy()
"""
def __init__(self, *args, **kwargs):
super(EvolvingStarsWithMassLoss, self).__init__(*args, **kwargs)
attrs = set(["previous_age",
"previous_mass",
])
self._private.attribute_names |= attrs
def add_particles(self, particles, *args, **kwargs):
new_particles = super(EvolvingStarsWithMassLoss, self).add_particles(
particles, *args, **kwargs)
return new_particles
def evolve_mass_loss(self, time):
if self._private.previous_time <= time:
self.update_from_evolution()
StarsWithMassLoss.evolve_mass_loss(self, time)
def update_from_evolution(self):
if (self.age != self.previous_age).any():
mass_loss = self.previous_mass - self.mass
timestep = self.age - self.previous_age
self.wind_mass_loss_rate = mass_loss / timestep
self.previous_age = self.age
self.previous_mass = self.mass
class SimpleWind(PositionGenerator):
"""
The simple wind model creates SPH particles moving away
from the star at the terminal velocity.
This is a safe assumption if the distance to other objects
is (far) larger then the stellar radius.
"""
def __init__(self, sph_particle_mass, derive_from_evolution=False,
tag_gas_source=False, compensate_gravity=False, **kwargs):
self.r_max = kwargs.pop("r_max", None)
super(SimpleWind, self).__init__(**kwargs)
self.sph_particle_mass = sph_particle_mass
self.model_time = 0.0 | units.yr
if derive_from_evolution:
self.particles = EvolvingStarsWithMassLoss()
self.particles.add_calculated_attribute(
"terminal_wind_velocity", kudritzki_wind_velocity,
attributes_names=['mass', 'radius',
'luminosity', 'temperature'])
else:
self.particles = StarsWithMassLoss()
self.target_gas = self.timestep = None
self.tag_gas_source = tag_gas_source
self.compensate_gravity = compensate_gravity
self.internal_energy_formula = self.internal_energy_from_temperature
self.set_initial_wind_velocity()
def set_initial_wind_velocity(self):
self.particles.add_calculated_attribute(
"initial_wind_velocity", lambda v: v,
attributes_names=['terminal_wind_velocity'])
def evolve_particles(self):
self.particles.evolve_mass_loss(self.model_time)
def evolve_model(self, time):
if self.has_target():
while self.model_time < time:
self.evolve_particles()
if self.has_new_wind_particles():
wind_gas = self.create_wind_particles()
self.target_gas.add_particles(wind_gas)
self.model_time += self.timestep
else:
self.model_time = time
self.evolve_particles()
def set_target_gas(self, target_gas, timestep):
self.target_gas = target_gas
self.timestep = timestep
def has_target(self):
return self.target_gas is not None
def internal_energy_from_temperature(self, star, wind=None):
"""
set the internal energy from the stellar surface temperature.
"""
return (3./2. * constants.kB * star.temperature / star.mu)
def internal_energy_from_velocity(self, star, wind=None):
"""
set the internal energy from the terminal wind velocity.
"""
return 0.5 * star.terminal_wind_velocity**2
def wind_sphere(self, star, Ngas):
wind = Particles(Ngas)
wind_velocity = star.initial_wind_velocity
outer_wind_distance = star.radius + wind_velocity * (
self.model_time - star.wind_release_time)
if self.r_max is not None and outer_wind_distance < self.r_max:
outer_wind_distance = self.r_max
wind.position, direction = self.generate_positions(
Ngas, star.radius, outer_wind_distance)
if self.compensate_gravity:
r = wind.position.lengths()
escape_velocity_squared = 2. * constants.G * star.mass / r
speed = (wind_velocity**2 + escape_velocity_squared).sqrt()
wind.velocity = self.as_three_vector(speed) * direction
else:
wind.velocity = direction * wind_velocity
return wind
def create_wind_particles_for_one_star(self, star):
Ngas = int(star.lost_mass/self.sph_particle_mass)
star.lost_mass -= Ngas * self.sph_particle_mass
wind = self.wind_sphere(star, Ngas)
wind.mass = self.sph_particle_mass
wind.u = self.internal_energy_formula(star, wind)
wind.position += star.position
wind.velocity += star.velocity
if self.tag_gas_source:
wind.source = star.key
return wind
def create_wind_particles(self):
wind = Particles(0)
for star in self.particles:
if star.lost_mass > self.sph_particle_mass:
new_particles = self.create_wind_particles_for_one_star(star)
wind.add_particles(new_particles)
star.wind_release_time = self.model_time
return wind
def has_new_wind_particles(self):
return self.particles.lost_mass.max() > self.sph_particle_mass
def create_initial_wind_for_time(self, time, check_length=True):
"""
Particles are created as if the wind has already been blowing for
'time'. Note that this does not work if the mass loss is derived
from stellar evolution.
"""
self.model_time = time
self.particles.evolve_mass_loss(self.model_time)
if self.has_new_wind_particles():
wind_gas = self.create_wind_particles()
if self.has_target():
self.target_gas.add_particles(wind_gas)
elif check_length:
raise AmuseException("create_initial_wind time was too small to"
"create any particles.")
else:
wind_gas = Particles()
self.reset()
return wind_gas
def create_initial_wind(self, number):
"""
This is a convenience method that creates some initial particles.
"""
required_mass = number * self.sph_particle_mass
total_mass_loss = self.particles.wind_mass_loss_rate.sum()
time = 1.0 * required_mass/total_mass_loss
wind_gas = Particles()
while len(wind_gas) < number:
time = 1.1 * time
wind_gas = self.create_initial_wind_for_time(time, False)
return wind_gas[:number]
def reset(self):
self.particles.reset()
self.model_time = 0.0 | units.yr
def set_begin_time(self, time):
self.model_time = time
self.particles.set_begin_time(time)
def get_gravity_at_point(self, eps, x, y, z):
return [0, 0, 0] | units.m/units.s**2
def get_potential_at_point(self, radius, x, y, z):
return [0, 0, 0] | units.J
class AccelerationFunction(object):
"""
Abstact superclass of all acceleration functions.
It numerically derives everything using acceleration_from_radius
Overwrite as many of these functions with analitic solutions as possible.
"""
def __init__(self):
try:
from scipy import integrate, optimize
self.quad = integrate.quad
self.brentq = optimize.brentq
except ImportError:
self.quad = self.unsupported
self.brentq = self.unsupported
def unsupported(self, *args, **kwargs):
raise AmuseException("Importing SciPy has failed")
def acceleration_from_radius(self, radius, star):
"""
to be overridden
"""
pass
def velocity_from_radius(self, radius, star):
def stripped_acceleration(r1):
acc = self.acceleration_from_radius(r1 | units.RSun, star)
return acc.value_in(units.RSun/units.yr**2)
def acc_integral(r):
start = star.radius.value_in(units.RSun)
result = self.quad(stripped_acceleration, start, r)
return result[0]
integral = numpy.vectorize(acc_integral)(radius.value_in(units.RSun))
integral = integral | units.RSun**2/units.yr**2
return (2. * integral + star.initial_wind_velocity**2).sqrt()
def radius_from_time(self, time, star):
"""
following http://math.stackexchange.com/questions/54586/
converting-a-function-for-velocity-vs-position-vx-to-position-vs-time
"""
def inverse_velocity(r1):
velocity = self.velocity_from_radius(r1 | units.RSun, star)
return 1. / velocity.value_in(units.RSun/units.yr)
def one_radius(t):
def residual(r2):
start = star.radius.value_in(units.RSun)
result = self.quad(inverse_velocity, start, r2)
return result[0] - t.value_in(units.yr)
start = star.radius.value_in(units.RSun)
end = 1e5 * start
result = self.brentq(residual, start, end)
return result
radius = numpy.vectorize(one_radius)(time)
return radius | units.RSun
def radius_from_number(self, numbers, max_radius, star):
"""
See http://www.av8n.com/physics/arbitrary-probability.htm
for some good info on this.
"""
rmin = star.radius.value_in(units.RSun)
rmax = max_radius.value_in(units.RSun)
def inverse_velocity(r1):
velocity = self.velocity_from_radius(r1 | units.RSun, star)
velocity = velocity.value_in(units.RSun/units.s)
return 1. / velocity
def cumulative_inverse_velocity(q):
res = self.quad(inverse_velocity, rmin, q)
return res[0]
d_max = cumulative_inverse_velocity(rmax)
def one_radius(x):
def residual(r2):
return cumulative_inverse_velocity(r2) / d_max - x
return self.brentq(residual, rmin, rmax)
radius = numpy.vectorize(one_radius)(numbers)
return radius | units.RSun
def fix_cutoffs(self, test, value, star, default):
if hasattr(value, "__len__"):
value[test] = default
elif test:
value = default
return value
def fix_acc_cutoff(self, r, acc, star):
if star.acc_cutoff is None:
return acc
test = r > star.acc_cutoff
return self.fix_cutoffs(test, acc, star, quantities.zero)
def fix_v_cutoff(self, r, v, star):
if star.acc_cutoff is None:
return v
test = r > star.acc_cutoff
return self.fix_cutoffs(test, v, star, star.terminal_wind_velocity)
class ConstantVelocityAcceleration(AccelerationFunction):
"""
A very basic "acceleration" function that ensures a constant velocity,
"""
def __init__(self, use_initial=False):
super(ConstantVelocityAcceleration, self).__init__()
self.use_initial = use_initial
def velocity(self, star):
if self.use_initial:
return star.initial_wind_velocity
else:
return star.terminal_wind_velocity
def acceleration_from_radius(self, radius, star):
return numpy.zeros_like(radius, dtype=float) | units.m/units.s**2
def velocity_from_radius(self, radius, star):
return numpy.ones_like(radius, dtype=float) * self.velocity(star)
def radius_from_time(self, t, star):
return star.radius + t * self.velocity(star)
def radius_from_number(self, x, r_max, star):
r_star = star.radius
return x * (r_max - r_star) + r_star
class RSquaredAcceleration(AccelerationFunction):
def scaling(self, star):
denominator = 1./star.radius
if star.acc_cutoff is not None:
denominator = denominator - 1./star.acc_cutoff
numerator = (
star.terminal_wind_velocity**2
- star.initial_wind_velocity**2
)
return 0.5 * numerator / denominator
def acceleration_from_radius(self, r, star):
acc = self.scaling(star)/r**2
return self.fix_acc_cutoff(r, acc, star)
def velocity_from_radius(self, r, star):
v = (2 * self.scaling(star) * (1./star.radius - 1./r)
+ star.initial_wind_velocity**2).sqrt()
return self.fix_v_cutoff(r, v, star)
class DelayedRSquaredAcceleration(AccelerationFunction):
def scaling(self, star):
denominator = 1./star.acc_start
if star.acc_cutoff is not None:
denominator = denominator - 1./star.acc_cutoff
numerator = (
star.terminal_wind_velocity**2
- star.initial_wind_velocity**2
)
return 0.5 * numerator / denominator
def fix_acc_start_cutoff(self, r, acc, star):
return self.fix_cutoffs(r < star.acc_start, acc, star, quantities.zero)
def fix_v_start_cutoff(self, r, v, star):
test = r < star.acc_start
return self.fix_cutoffs(test, v, star, star.initial_wind_velocity)
def acceleration_from_radius(self, r, star):
acc = self.scaling(star)/r**2
acc = self.fix_acc_start_cutoff(r, acc, star)
return self.fix_acc_cutoff(r, acc, star)
def velocity_from_radius(self, r, star):
v = (2 * self.scaling(star) * (1./star.acc_start - 1./r)
+ star.initial_wind_velocity**2).sqrt()
v = self.fix_v_start_cutoff(r, v, star)
return self.fix_v_cutoff(r, v, star)
class BetaLawAcceleration(AccelerationFunction):
""" Following Lamers 1999 and Maciel 2005 """
def __init__(self, beta=.8):
super(BetaLawAcceleration, self).__init__()
self.beta = beta
def acceleration_from_radius(self, r, star):
v_diff = star.terminal_wind_velocity - star.initial_wind_velocity
dvdr = (v_diff * star.radius / r**2
* self.beta * (1 - star.radius / r)**(self.beta - 1))
return dvdr * self.velocity_from_radius(r, star)
def velocity_from_radius(self, r, star):
v_start = star.initial_wind_velocity
v_end = star.terminal_wind_velocity
return v_start + (v_end - v_start) * (1 - star.radius/r)**self.beta
class LogisticVelocityAcceleration(AccelerationFunction):
""" The velocity follows the Logistic (Sigmoid) Function """
def __init__(self, steepness=10, r_mid=3.):
super(LogisticVelocityAcceleration, self).__init__()
self.steepness = steepness
self.r_mid = r_mid
def short(self, r, star):
v_init = star.initial_wind_velocity
v_end = star.terminal_wind_velocity
r_mid = self.r_mid * star.radius
exp = numpy.exp(-self.steepness * (r - r_mid) / (r_mid))
return v_init, v_end, r_mid, exp
def acceleration_from_radius(self, r, star):
v_init, v_end, r_mid, exp = self.short(r, star)
dvdr = (self.steepness * (v_end - v_init) * exp
/ (r_mid * (1. + exp)**2))
v = v_init + (v_end - v_init) / (1. + exp)
acc = v * dvdr
return self.fix_acc_cutoff(r, acc, star)
def velocity_from_radius(self, r, star):
v_init, v_end, r_mid, exp = self.short(r, star)
v = v_init + (v_end - v_init) / (1. + exp)
return self.fix_v_cutoff(r, v, star)
class AGBAcceleration(AccelerationFunction):
""" fit by Onno to the profiles by Nowotny 2005 """
def __init__(self, alpha=10, r_mid=3.):
super(AGBAcceleration, self).__init__()
self.alpha = alpha
self.r_mid = r_mid
def short(self, r, star):
v_init = star.initial_wind_velocity
v_end = star.terminal_wind_velocity
scaling = self.r_mid**self.alpha
return v_init, v_end, scaling, r/star.radius
def acceleration_from_radius(self, r, star):
v_init, v_end, scaling, r_over_R = self.short(r, star)
top = self.alpha * scaling * r_over_R**self.alpha
bottom = r * (r_over_R**(-self.alpha) + scaling)**2
dvdr = (v_end - v_init) * top / bottom
acc = self.velocity_from_radius(r, star) * dvdr
return self.fix_acc_cutoff(r, acc, star)
def velocity_from_radius(self, r, star):
v_init, v_end, scaling, r_over_R = self.short(r, star)
denominator = 1 + scaling * r_over_R**(-self.alpha)
v = v_init + (v_end - v_init)/denominator
return self.fix_v_cutoff(r, v, star)
class AcceleratingWind(SimpleWind):
"""
This wind model returns SPH particles moving away from the star at sub
terminal velocity. It also adds a potential around the star that
represents the radiation pressure. This potential can accelerate all
particles away from the star using bridge. This is good for simulating
processes within a few stellar radii.
"""
acc_functions = {"constant_velocity": ConstantVelocityAcceleration,
"rsquared": RSquaredAcceleration,
"delayed_rsquared": DelayedRSquaredAcceleration,
"logistic": LogisticVelocityAcceleration,
"beta_law": BetaLawAcceleration,
"agb": AGBAcceleration,
}
def __init__(self, *args, **kwargs):
r_out_ratio = kwargs.pop("r_out_ratio", None)
acc_start_ratio = kwargs.pop("acc_start_ratio", 2)
grav_r_out_ratio = kwargs.pop("grav_r_out_ratio", None)
acc_func = kwargs.pop("acceleration_function", "constant_velocity")
acc_func_args = kwargs.pop("acceleration_function_args", {})
self.critical_timestep = kwargs.pop("critical_timestep", None)
self.v_init_ratio = kwargs.pop("v_init_ratio", None)
self.compensate_pressure = kwargs.pop("compensate_pressure", False)
super(AcceleratingWind, self).__init__(*args, **kwargs)
if isinstance(acc_func, str):
acc_func = self.acc_functions[acc_func]
self.acc_function = acc_func(**acc_func_args)
def r_out_function(r):
if r_out_ratio is None:
return None
else:
return r_out_ratio * r
self.particles.add_calculated_attribute(
"acc_cutoff", r_out_function,
attributes_names=['radius'])
def grav_r_out_function(r):
if r_out_ratio is None:
return None
else:
return grav_r_out_ratio * r
self.particles.add_calculated_attribute(
"grav_acc_cutoff", grav_r_out_function,
attributes_names=['radius'])
self.particles.add_calculated_attribute(
"acc_start", lambda r: acc_start_ratio * r,
attributes_names=['radius'])
self.internal_energy_formula = self.scaled_u_from_T
self.gamma = 5./3.
self.staging_radius = None
def set_initial_wind_velocity(self):
if self.v_init_ratio is not None:
self.particles.add_calculated_attribute(
"initial_wind_velocity", lambda v: self.v_init_ratio * v,
attributes_names=['terminal_wind_velocity'])
def scaled_u_from_T(self, star, wind=None):
"""
set the internal energy from the stellar surface temperature.
"""
u_0 = (3./2. * constants.kB * star.temperature / star.mu)
if wind is None:
return u_0
m_dot = star.wind_mass_loss_rate
v_0 = star.initial_wind_velocity
rho_0 = m_dot / (4. * numpy.pi * star.radius**2 * v_0)
r = wind.position.lengths()
v = wind.velocity.lengths()
rho = m_dot / (4. * numpy.pi * r**2 * v)
u = rho_0**(1 - self.gamma) * rho**(self.gamma - 1) * u_0
return u
def wind_sphere(self, star, Ngas):
wind = Particles(Ngas)
dt = (self.model_time - star.wind_release_time)
if self.critical_timestep is None or dt > self.critical_timestep:
acc_function = self.acc_function
else:
acc_function = ConstantVelocityAcceleration(use_initial=True)
outer_wind_distance = acc_function.radius_from_time(dt, star)
wind.position, direction = self.generate_positions(
Ngas, star.radius, outer_wind_distance,
acc_function.radius_from_number, star=star)
velocities = acc_function.velocity_from_radius(
wind.position.lengths(), star)
wind.velocity = direction * self.as_three_vector(velocities)
return wind
def pressure_accelerations(self, indices, radii, star):
v = self.acc_function.velocity_from_radius(radii, star)
a = self.acc_function.acceleration_from_radius(radii, star)
u = self.internal_energy_formula(star)
m_dot = star.wind_mass_loss_rate
v_init = star.initial_wind_velocity
rho = m_dot / (4 * numpy.pi * v * radii**2)
rho_init = m_dot / (4. * numpy.pi * v_init * star.radius**2)
k = (self.gamma-1) * rho_init**(1-self.gamma) * u
dvdr = a/v
acceleration = (self.gamma * k * rho**(self.gamma-1)
* (2./radii + dvdr/v))
return acceleration
def radial_velocities(self, gas, star):
rad_velocity = [] | units.ms
pos_vel = zip(gas.position-star.position, gas.velocity-star.velocity)
for pos, vel in pos_vel:
rad_direction = pos/pos.length()
scalar_projection = vel.dot(rad_direction)
rad_velocity.append(scalar_projection)
return rad_velocity
def set_staging_radius(self, staging_radius, gas, timestep):
self.staging_radius = staging_radius
self.st_target_gas = gas
self.st_timestep = timestep
def staging_accelerations(self, indices, radii, star):
particles = self.st_target_gas[indices]
v_now = self.radial_velocities(particles, star)
v_target = self.acc_function.velocity_from_radius(radii, star)
dt = self.st_timestep
acc = (v_target - v_now) / dt
return acc
def acceleration(self, star, radii):
accelerations = numpy.zeros(radii.shape) | units.m/units.s**2
i_acc = radii >= star.radius
if star.acc_cutoff is not None:
i_acc = i_acc & (radii < star.acc_cutoff)
accelerations[i_acc] += self.acc_function.acceleration_from_radius(
radii[i_acc], star
)
if self.compensate_pressure:
if self.staging_radius is not None:
i_pres = radii > star.radius * self.staging_radius
else:
i_pres = i_acc
accelerations[i_pres] -= self.pressure_accelerations(
i_pres, radii[i_pres], star)
if self.compensate_gravity:
if star.grav_acc_cutoff is not None:
indices = radii < star.grav_acc_cutoff
accelerations[indices] += (
constants.G * star.mass / radii[indices]**2
)
else:
accelerations += constants.G * star.mass / radii**2
if self.staging_radius is not None:
i_stag = radii < star.radius * self.staging_radius
if i_stag.any():
accelerations[i_stag] += (
self.staging_accelerations(i_stag, radii[i_stag], star)
)
return accelerations
def get_gravity_at_point(self, eps, x, y, z):
total_acceleration = (
numpy.zeros(shape=(len(x), 3)) | units.m/units.s**2)
positions = quantities.as_vector_quantity([x, y, z]).transpose()
for star in self.particles:
relative_position = positions - star.position
distance = relative_position.lengths()
acceleration = self.acceleration(star, distance)
direction = relative_position / self.as_three_vector(distance)
direction = numpy.nan_to_num(direction)
acc_vector = self.as_three_vector(acceleration)
total_acceleration += direction * acc_vector
return total_acceleration.transpose()
class HeatingWind(SimpleWind):
"""
This wind model returns SPH particles that have no initial velocity
with respect to the star. The energy of the integrated mechanical
luminosity is added as internal energy. This is a numerical
integration, so the timescale with which evolve_model is called should
be small enough for convergence.
This method good for simulating processes far from the star, and when
the SPH particle mass is larger then the stellar mass loss per
timestep. It can make a big difference when the wind is derived from
evolution.
"""
def __init__(self, *args, **kwargs):
self.feedback_efficiency = kwargs.pop("feedback_efficiency", 0.01)
self.r_max_ratio = kwargs.pop("r_max_ratio", 5)
super(HeatingWind, self).__init__(*args, **kwargs)
self.internal_energy_formula = self.mechanical_internal_energy
self.previous_time = 0 | units.Myr
self.particles.track_mechanical_energy()
self.supernova_energy = 1e51 | units.erg
def evolve_particles(self):
self.particles.evolve_mass_loss(self.model_time)
def went_supernova(self, star, mass_lost):
manual = star.mass_loss_type == "supernova"
post_SN = star.stellar_type in [13, 14, 15] | units.stellar_type
enough_lost = mass_lost > (.1 | units.MSun)
return manual or (post_SN and enough_lost)
def mechanical_internal_energy(self, star, wind):
mass_lost = wind.mass.sum()
lmech = star.mechanical_energy
lmech_wind = lmech / (star.lost_mass/mass_lost + 1)
star.mechanical_energy -= lmech_wind
if self.went_supernova(star, mass_lost):
lmech_wind = self.supernova_energy
star.mass_loss_type = "wind"
return self.feedback_efficiency * lmech_wind / mass_lost
def wind_sphere(self, star, Ngas):
wind = Particles(Ngas)
r_max = self.r_max or self.r_max_ratio * star.radius
wind.position, direction = self.generate_positions(Ngas, star.radius,
r_max)
wind.velocity = [0, 0, 0] | units.kms
return wind
def reset(self):
super(HeatingWind, self).reset()
self.previous_time = 0 | units.Myr
def new_stellar_wind(sph_particle_mass, target_gas=None, timestep=None,
derive_from_evolution=False, mode="simple", **kwargs):
"""
Create a new stellar wind code.
target_gas: a gas particle set into which the wind particles should be
put (requires timestep)
timestep: the timestep at which the wind particles should be generated.
derive_from_evolution: derive the wind parameters from stellar
evolution (you still need to update the stellar parameters)
mode: set to 'simple', 'accelerate' or 'mechanical'
"""
if (target_gas is None) ^ (timestep is None):
raise AmuseException("Must specify both target_gas and timestep"
"(or neither)")
wind_modes = {"simple": SimpleWind,
"accelerate": AcceleratingWind,
"heating": HeatingWind,
}
stellar_wind = wind_modes[mode](sph_particle_mass, derive_from_evolution,
**kwargs)
if target_gas is not None:
stellar_wind.set_target_gas(target_gas, timestep)
return stellar_wind
def static_wind_from_stellar_evolution(stellar_wind, stellar_evolution,
start_time, end_time):
"""
Convenience method that sets up the stellar wind parameters using a
stellar evolution code. The change in the stars between start_time and
end_time determines the stellar wind. Do not add the star particles to
the stellar_wind code before calling this function.
"""
stellar_evolution.evolve_model(start_time)
stellar_wind.particles.add_particles(stellar_evolution.particles)
stellar_evolution.evolve_model(end_time)
chan = stellar_evolution.particles.new_channel_to(stellar_wind.particles)
chan.copy_attributes(["age", "radius", "mass", "luminosity",
"temperature"])
stellar_wind.evolve_model(0 | units.yr)
stellar_wind.reset()
return stellar_wind
| 41,820
| 34.959587
| 81
|
py
|
amuse
|
amuse-main/src/amuse/ext/LagrangianRadii.py
|
import collections
import numpy
import operator
import os
import random
import sys
import unittest
from math import sqrt
from amuse.units import nbody_system
from amuse.units import units
from amuse.rfi.core import is_mpd_running
from amuse.ic.plummer import new_plummer_model
from amuse.ic.salpeter import new_salpeter_mass_distribution
MassFraction = [0.005, 0.01, 0.02, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0] \
| units.none
def distance_sq(stars, com):
return (stars.position - com).lengths_squared()
def LagrangianRadii(stars, verbose=False, massf=MassFraction):
com = stars.center_of_mass()
#stars.position = stars.position - com
vcom = stars.center_of_mass_velocity()
#stars.velocity = stars.velocity - vcom
n_stars = len(stars)
# Next line is a potential performance bottleneck, becasue
# the for loop is not in numpy but explicit
# old but slow: d2 = numpy.array([distance_sq(star) for star in stars])
d2 = distance_sq(stars, com)
m = stars.mass / stars.mass.sum()
d2m = list(zip(d2, m))
d2m.sort(key=operator.itemgetter(0))
iL = 0
mt = 0 | units.none
Lagrad = []
for d2i, mi in d2m:
mt += mi
while mt >= massf[iL]:
Lagrad.append(d2i.sqrt())
if verbose:
print("Lagrangian Radius M= ", mt, \
"(iL=", iL, ") at d= ", Lagrad[-1])
iL += 1
if iL >= len(massf):
break
return Lagrad
def main():
assert is_mpd_running()
seed = None
nstars = 128
if len(sys.argv) > 1:
stars = int(sys.argv[1])
with_units = len(sys.argv) > 2
if not with_units:
mass_unit = nbody_system.mass
length_unit = nbody_system.length
else :
mass_unit = units.MSun
length_unit = units.parsec
m_min = 0.1 | mass_unit
m_max = 100 | mass_unit
alpha = -2.35
r_vir = 1 | length_unit
masses = new_salpeter_mass_distribution(nstars, m_min, m_max, alpha)
m_tot = masses.sum()
if not with_units:
convert_nbody = None
masses /= m_tot.value_in(nbody_system.mass) # scale to unit mass
m_tot = 1 | nbody_system.mass
else:
convert_nbody = nbody_system.nbody_to_si(m_tot, r_vir)
convert_nbody.set_as_default()
print(m_tot)
stars = new_plummer_model(nstars, convert_nbody, random_state=seed)
stars.mass = masses
LagrangianRadii(stars, verbose=True)
if __name__ == '__main__':
main()
| 2,524
| 24.505051
| 75
|
py
|
amuse
|
amuse-main/src/amuse/ext/spherical_model.py
|
import numpy
from amuse.support.exceptions import AmuseException, AmuseWarning
from amuse.units import units, nbody_system, generic_unit_system, constants
from amuse.datamodel import Particles
from amuse.ext.sobol import i4_sobol_generate
class EnclosedMassInterpolator(object):
"""
Interpolator used in 'get_enclosed_mass_from_tabulated' and 'get_radius_for_enclosed_mass'.
These two functions are required for 'new_spherical_particle_distribution'.
"""
def __init__(self, radii = None, densities = None, core_radius = None):
self.initialized = False
self.four_thirds_pi = numpy.pi * 4.0/3.0
if (radii and densities):
self.initialize(radii, densities, core_radius = core_radius)
def initialize(self, radii, densities, core_radius = None):
self.sort_density_and_radius(densities*1.0, radii*1.0, core_radius = core_radius)
self.calculate_enclosed_mass_table()
self.initialized = True
def sort_density_and_radius(self, densities, radii, core_radius = None):
self.radii, self.densities = radii.sorted_with(densities)
self.radii.prepend(core_radius or 0 | units.m)
def calculate_enclosed_mass_table(self):
self.radii_cubed = self.radii**3
self.enclosed_mass = [0.0] | units.kg
for rho_shell, r3_in, r3_out in zip(self.densities, self.radii_cubed, self.radii_cubed[1:]):
self.enclosed_mass.append(self.enclosed_mass[-1] + rho_shell * (r3_out - r3_in))
self.enclosed_mass = self.four_thirds_pi * self.enclosed_mass
def get_index(self, value, sorted_vector):
out_of_bounds = numpy.logical_or(sorted_vector[0] > value, value > sorted_vector[-1])
if out_of_bounds.any():
value = numpy.compress(numpy.array([out_of_bounds]).flatten(), value.number) | value.unit
raise AmuseException("Can't find a valid index. {0} is not in "
"the range [{1}, {2}].".format(value, sorted_vector[0], sorted_vector[-1]))
index = numpy.searchsorted(sorted_vector.number, value.value_in(sorted_vector.unit))
return numpy.maximum(index - 1, 0)
def get_enclosed_mass(self, radius):
if not self.initialized:
raise AmuseException("Can't calculate enclosed mass: interpolator is not initialized")
index = self.get_index(radius, self.radii)
return (self.enclosed_mass[index] + self.four_thirds_pi *
self.densities[index] * (radius**3 - self.radii_cubed[index]))
def get_radius_for_enclosed_mass(self, enclosed_mass):
if not self.initialized:
raise AmuseException("Can't calculate radius for enclosed mass: interpolator is not initialized")
index = self.get_index(enclosed_mass, self.enclosed_mass)
return (((enclosed_mass - self.enclosed_mass[index]) / (self.four_thirds_pi * self.densities[index])
+ self.radii_cubed[index]))**(1.0/3.0)
class UniformSphericalDistribution(object):
"""
Creates a uniform spherical grid of particles. Type can be:
"cubic": 'crystal' composed of cubes with particles on each corner
"bcc": as cubic but with additional particles at the center of each cube
"body_centered_cubic": same as "bcc"
"fcc": as cubic but with additional particles at the face of each cube
"face_centered_cubic": same as "fcc"
"random": particles are randomly distributed using numpy.random.uniform
"glass": like random, but stabilised using hydro pressure and no gravity
"sobol": 3D sobol sequence (low discrepancy, quasi-random)
"offset" is only used for the regular grids ("cubic", "bcc", "fcc"), and
should contain three numbers in the half-open interval [0, 1). These
define the offset between the origin of the grid and the corner
of the unit cell, normalized to the unit cell size.
"target_rms" is only used for "glass" as the density criterion for convergence
"""
def __init__(self, number_of_particles, type = "bcc",
offset = (0.82832951, 0.27237167, 0.37096327),
mass_cutoff = 1, target_rms = 0.01):
if not hasattr(self, type):
raise TypeError("Unknown grid type option: {0}".format(type))
self.number_of_particles = number_of_particles
self.type = type
self.offset = offset
self.mass_cutoff = mass_cutoff
self.target_rms = target_rms
def cubic(self):
n1D = numpy.ceil( 0.5*(2*self.number_of_particles)**(1./3) ) * 2 + 3 # odd number
x, y, z = numpy.mgrid[-1. : 1. : n1D*1j,
-1. : 1. : n1D*1j,
-1. : 1. : n1D*1j]
x = x.flatten()
y = y.flatten()
z = z.flatten()
for delta, vec in zip(self.offset, (x,y,z)):
vec += delta * 2.0 / (n1D - 1)
return self._cutout_sphere(x, y, z)
def bcc(self):
n1D = numpy.ceil( 0.5*(self.number_of_particles)**(1./3) ) * 2 + 3 # odd number
x1,y1,z1 = numpy.mgrid[-1. : 1. : n1D*1j,
-1. : 1. : n1D*1j,
-1. : 1. : n1D*1j]
n_2 = n1D - 1
x2,y2,z2 = numpy.mgrid[-1.+1./n_2 : 1.-1./n_2 : n_2*1j,
-1.+1./n_2 : 1.-1./n_2 : n_2*1j,
-1.+1./n_2 : 1.-1./n_2 : n_2*1j]
x = numpy.concatenate( (x1.flatten(),x2.flatten()) )
y = numpy.concatenate( (y1.flatten(),y2.flatten()) )
z = numpy.concatenate( (z1.flatten(),z2.flatten()) )
for delta, vec in zip(self.offset, (x,y,z)):
vec += delta * 2.0 / n_2
return self._cutout_sphere(x, y, z)
body_centered_cubic = bcc
def fcc(self):
n1D = numpy.ceil((self.number_of_particles / 2.0)**(1.0/3.0)) + 1
delta = 1.0 / (n1D - 1.5)
x, y, z = numpy.mgrid[-1.0-2*delta : 1-delta : n1D*1j,
-1.0-2*delta : 1-delta : n1D*1j,
-1.0-2*delta : 1-delta : n1D*1j]
x0 = x.flatten() + self.offset[0] * 2 * delta
y0 = y.flatten() + self.offset[1] * 2 * delta
z0 = z.flatten() + self.offset[2] * 2 * delta
x1 = x0 + delta
y1 = y0 + delta
z1 = z0 + delta
x = numpy.concatenate((x0, x1, x0, x1))
y = numpy.concatenate((y0, y1, y1, y0))
z = numpy.concatenate((z0, z0, z1, z1))
return self._cutout_sphere(x, y, z)
face_centered_cubic = fcc
def _random_cube(self, number_of_particles):
x = numpy.random.uniform(-1.0, 1.0, number_of_particles)
y = numpy.random.uniform(-1.0, 1.0, number_of_particles)
z = numpy.random.uniform(-1.0, 1.0, number_of_particles)
return x, y, z
def random(self, number_of_particles = None, try_number_of_particles = None):
if number_of_particles is None:
number_of_particles = self.number_of_particles
if try_number_of_particles is None:
try_number_of_particles = number_of_particles
try_number_of_particles = int(try_number_of_particles)
x, y, z = self._random_cube(2*try_number_of_particles)
r_squared = x*x + y*y + z*z
select_sphere = numpy.where( r_squared < self.mass_cutoff**(2.0/3.0))
if len(select_sphere[0]) < number_of_particles:
return self.random(number_of_particles, numpy.ceil(try_number_of_particles*1.1) )
else:
return (x[select_sphere][0:number_of_particles],
y[select_sphere][0:number_of_particles],
z[select_sphere][0:number_of_particles])
def sobol(self):
x, y, z = i4_sobol_generate(3, 2*self.number_of_particles, 3) * 2.0 - 1.0
return self._cutout_sphere(x, y, z)
def glass(self):
from amuse.community.fi.interface import Fi
if self.target_rms < 0.0001:
print("warning: target_rms may not succeed")
if self.number_of_particles < 1000:
print("warning: not enough particles")
N = 2 * self.number_of_particles
L = 1 | nbody_system.length
dt = 0.01 | nbody_system.time
x, y, z = self._random_cube(N)
vx,vy,vz= self.random(N)
p = Particles(N)
p.x = L * x
p.y = L * y
p.z = L * z
p.h_smooth = 0.0 | nbody_system.length
p.vx = (0.1 | nbody_system.speed) * vx[:N]
p.vy = (0.1 | nbody_system.speed) * vy[:N]
p.vz = (0.1 | nbody_system.speed) * vz[:N]
p.u = (0.1*0.1) | nbody_system.speed**2
p.mass = (8.0/N) | nbody_system.mass
sph = Fi(mode = 'periodic', redirection = 'none')
sph.initialize_code()
sph.parameters.use_hydro_flag = True
sph.parameters.radiation_flag = False
sph.parameters.self_gravity_flag = False
sph.parameters.gamma = 1.0
sph.parameters.isothermal_flag = True
sph.parameters.integrate_entropy_flag = False
sph.parameters.timestep = dt
sph.parameters.verbosity = 0
sph.parameters.periodic_box_size = 2 * L
sph.parameters.artificial_viscosity_alpha = 1.0
sph.parameters.beta = 2.0
sph.commit_parameters()
sph.gas_particles.add_particles(p)
sph.commit_particles()
t = 0.0 | nbody_system.time
rms = 1.0
minrms = 1.0
i = 0
while rms > self.target_rms:
i += 1
t += (0.25 | nbody_system.time)
sph.evolve_model(t)
rho = sph.particles.rho.value_in(nbody_system.density)
rms = rho.std()/rho.mean()
minrms = min(minrms, rms)
if (rms > 2.0*minrms) or (i > 300):
print(" RMS(rho) convergence warning:", i, rms, minrms)
if i > 100000:
print("i> 100k steps - not sure about this...")
print(" rms:", rms)
break
x = sph.particles.x.value_in(nbody_system.length)
y = sph.particles.y.value_in(nbody_system.length)
z = sph.particles.z.value_in(nbody_system.length)
sph.stop()
del sph
return self._cutout_sphere(x, y, z)
def _cutout_sphere(self, x, y, z):
r_squared = x*x + y*y + z*z
sorted_indices = numpy.argsort(r_squared)
massfrac_edge = r_squared[sorted_indices[self.number_of_particles-1]]**(1.5)
massfrac_next = r_squared[sorted_indices[self.number_of_particles]]**(1.5)
r_max = (0.5*(massfrac_edge+massfrac_next) / self.mass_cutoff)**(1.0/3.0)
indices = sorted_indices[:self.number_of_particles]
return x[indices]/r_max, y[indices]/r_max, z[indices]/r_max
@property
def result(self):
return getattr(self, self.type)()
keyword_arguments_doc = """ :argument keyword_arguments: Optional arguments to UniformSphericalDistribution:
:argument type: Type of the basegrid. Can be:
"cubic": 'crystal' composed of cubes with particles on each corner
"bcc": as cubic but with additional particles at the center of each cube
"body_centered_cubic": same as "bcc"
"fcc": as cubic but with additional particles at the face of each cube
"face_centered_cubic": same as "fcc"
"random": particles are randomly distributed using numpy.random.uniform
"glass": like random, but stabilised using hydro pressure and no gravity
"sobol": 3D sobol sequence (low discrepancy, quasi-random)
:argument offset: only used for the regular grids ("cubic", "bcc", "fcc"), and
should contain three numbers in the half-open interval [0, 1). These
define the offset between the origin of the grid and the corner
of the unit cell, normalized to the unit cell size.
:argument target_rms only used for "glass" as the density criterion for convergence
"""
def new_uniform_spherical_particle_distribution(number_of_particles, size, total_mass, **keyword_arguments):
"""
Returns a Particles set with positions following a uniform
spherical distribution. Only the positions and masses
(equal-mass system) are set.
:argument number_of_particles: Number of particles in the resulting model
:argument size: Radius of the sphere enclosing the model
:argument total_mass: Total mass of the Particles set
"""
particles = Particles(number_of_particles)
particles.mass = total_mass * 1.0 / number_of_particles
x, y, z = UniformSphericalDistribution(number_of_particles, **keyword_arguments).result
particles.x = size * x
particles.y = size * y
particles.z = size * z
return particles
new_uniform_spherical_particle_distribution.__doc__ += keyword_arguments_doc
def new_spherical_particle_distribution(number_of_particles,
radial_density_func = None, # not yet supported, specify radii and densities tables:
radii = None, densities = None,
total_mass = None, size = None, # if total_mass is not given, it will be deduced from size or max(radii)
**keyword_arguments): # optional arguments for UniformSphericalDistribution
"""
Returns a Particles set with positions following a spherical
distribution. The radial density profile is determined from the
look-up table (radii, densities). Entries in the 'radii' table
are interpreted as the outer radius of the shell, with uniform
density as defined by the corresponding entry in the 'densities'
table:
rho(r) = densities[i], for ( radii[i-1] <= r <= radii[i] )
Only the positions and masses (equal-mass system) are set.
:argument number_of_particles: Number of particles in the resulting model
:argument radii: Table with radii for the radial density profile
:argument densities: Table with densities for the radial density profile
:argument total_mass: Total mass of the Particles set (optional, will be
deduced from size or max(radii) otherwise)
:argument size: Radius of the sphere enclosing the model (optional)
"""
if (radii is None) or (densities is None):
raise AmuseException("Using an arbitrary radial density function is not yet "
"supported. Radius and density tables must be passed instead.")
interpolator = EnclosedMassInterpolator()
interpolator.initialize(radii, densities)
if total_mass is None:
total_mass = interpolator.get_enclosed_mass(size or max(radii))
particles = Particles(number_of_particles)
particle_mass = total_mass * 1.0 / number_of_particles
particles.mass = particle_mass
x, y, z = UniformSphericalDistribution(number_of_particles, **keyword_arguments).result
# Now scale the uniformly distributed particle positions to match the radial density profile
r_old = numpy.sqrt(x*x + y*y + z*z)
indices = numpy.argsort(r_old)
if r_old[indices[0]] == 0.0:
r_old[indices[0]] = 1.0
f_scale = interpolator.get_radius_for_enclosed_mass(
(numpy.arange(0.5, number_of_particles + 0.5) | units.none) * particle_mass) / r_old[indices]
particles.x = (f_scale * x[indices]).as_quantity_in(radii.unit)
particles.y = (f_scale * y[indices]).as_quantity_in(radii.unit)
particles.z = (f_scale * z[indices]).as_quantity_in(radii.unit)
return particles
new_spherical_particle_distribution.__doc__ += keyword_arguments_doc
plummer_arguments_doc = """
:argument number_of_particles: Number of particles in the resulting model
:argument total_mass: Total mass of the Particles set
:argument virial_radius: Virial radius of the Plummer model
""" + keyword_arguments_doc
def new_plummer_spatial_distribution(number_of_particles,
total_mass = 1.0|nbody_system.mass,
virial_radius = 1.0|nbody_system.length,
mass_cutoff = 0.999,
**keyword_arguments): # optional arguments for UniformSphericalDistribution
"""
Returns a Particles set with positions following a Plummer
distribution.
Only the positions and masses (equal-mass system) are set.
"""
particles = Particles(number_of_particles)
particle_mass = total_mass * 1.0 / number_of_particles
particles.mass = particle_mass
x, y, z = UniformSphericalDistribution(
number_of_particles, mass_cutoff=mass_cutoff, **keyword_arguments).result
# Now scale the uniformly distributed particle positions to match the radial density profile
r_old = numpy.sqrt(x*x + y*y + z*z)
scale_factor = (0.1875 * numpy.pi * virial_radius.number) / numpy.sqrt(1.0 - r_old**2)
particles.x = scale_factor * x | virial_radius.unit
particles.y = scale_factor * y | virial_radius.unit
particles.z = scale_factor * z | virial_radius.unit
return particles
new_plummer_spatial_distribution.__doc__ += plummer_arguments_doc
def new_gas_plummer_distribution(number_of_particles,
total_mass = 1.0|nbody_system.mass,
virial_radius = 1.0|nbody_system.length,
G = None,
**keyword_arguments): # optional arguments for UniformSphericalDistribution
"""
Create a plummer gas model with the given number of particles. Returns a set
of SPH particles with equal masses and positions distributed to fit a plummer
distribution model. Velocities are set to zero, and internal energies are set
to balance the gravitational forces between the gas particles.
"""
particles = new_plummer_spatial_distribution(number_of_particles, total_mass=total_mass,
virial_radius=virial_radius, **keyword_arguments)
if G is None:
G = nbody_system.G if generic_unit_system.is_generic_unit(total_mass.unit) else constants.G
velocity_unit = (G*total_mass/virial_radius).sqrt().unit.base_unit()
particles.velocity = [0.0, 0.0, 0.0] | velocity_unit
plummer_radius = 0.1875 * numpy.pi * virial_radius
u_unit = (velocity_unit**2).base_unit()
particles.u = (1 + particles.position.lengths_squared()/plummer_radius**2)**(-0.5) | u_unit
particles.u *= 0.25 * (G*total_mass**2/virial_radius) / particles.thermal_energy()
return particles
new_gas_plummer_distribution.__doc__ += plummer_arguments_doc
def sample_from_velocity_distribution(number_of_particles):
x = numpy.random.uniform(0.0, 1.0, number_of_particles)
y = numpy.random.uniform(0.0, 0.1, number_of_particles)
selected = x[y <= (x**2) * (1.0 - x**2)**3.5]
if len(selected) < number_of_particles:
return numpy.concatenate((selected, sample_from_velocity_distribution(number_of_particles-len(selected))))
return selected
def random_direction(number_of_particles):
z = numpy.random.uniform(-1.0, 1.0, number_of_particles)
sine_theta = numpy.sqrt(1-z*z)
phi = numpy.random.uniform(0.0, 2*numpy.pi, number_of_particles)
return numpy.array([sine_theta * numpy.cos(phi), sine_theta * numpy.sin(phi), z]).transpose()
def new_plummer_distribution(number_of_particles,
total_mass = 1.0|nbody_system.mass,
virial_radius = 1.0|nbody_system.length,
mass_cutoff = 0.999,
G = None,
**keyword_arguments): # optional arguments for UniformSphericalDistribution
"""
Create a plummer model with the given number of particles. Returns a set
of particles with equal masses and positions distributed to fit a plummer
distribution model. Velocities are sampled using von Neumann's rejection
method (Aarseth et al. 1974), balancing the gravitational forces between
the particles.
"""
particles = new_plummer_spatial_distribution(number_of_particles, total_mass=total_mass,
virial_radius=virial_radius, **keyword_arguments)
if G is None:
G = nbody_system.G if generic_unit_system.is_generic_unit(total_mass.unit) else constants.G
velocity_unit = (G*total_mass/virial_radius).sqrt().unit.base_unit()
plummer_radius = 0.1875 * numpy.pi * virial_radius
escape_velocity = (1 + particles.position.lengths_squared()/plummer_radius**2)**(-0.25) | velocity_unit
velocity = escape_velocity * sample_from_velocity_distribution(number_of_particles)
velocity *= numpy.sqrt((G*total_mass*number_of_particles) / (2*virial_radius*velocity.length_squared()))
particles.velocity = velocity.reshape((-1,1)) * random_direction(number_of_particles)
return particles
new_plummer_distribution.__doc__ += plummer_arguments_doc
| 20,811
| 47.287703
| 114
|
py
|
amuse
|
amuse-main/src/amuse/ext/plotting_hydro.py
|
import os
import os.path
from amuse.units.quantities import zero
from amuse.plot import native_plot, sph_particles_plot
def new_plotting_hydrodynamics_code(hydrodynamics, timestep, plot_function=None, plot_function_arguments=dict(), plot_directory=None):
"""
Returns a new subclass of the hydrodynamics code that will produce plots at
regular intervals.
:argument hydrodynamics: SPH code class to inherit from
:argument timestep: interval for plotting (in )
:argument plot_function: function that will be called after each timestep
:argument plot_function_arguments: dict containing keyword arguments to the plot function
"""
class PlottingHydrodynamics(hydrodynamics):
_timestep = timestep
_plot_function = staticmethod(plot_function)
_plot_function_arguments = plot_function_arguments
_plot_directory = plot_directory
def __init__(self, *args, **kwargs):
super(PlottingHydrodynamics, self).__init__(*args, **kwargs)
self.time_last_plot = zero
self.previous_plot_number = -1
self.plot_directory = self._next_plot_directory()
if self._plot_function is None:
self._plot_function = sph_particles_plot
def _next_plot_directory(self):
if self._plot_directory is None:
plot_directory = os.path.join(os.getcwd(), "plots")
if not os.path.exists(plot_directory):
os.mkdir(plot_directory)
i = 0
while os.path.exists(os.path.join(plot_directory, "run_{0:=03}".format(i))):
i += 1
new_directory = os.path.join(plot_directory, "run_{0:=03}".format(i))
os.mkdir(new_directory)
return new_directory
else:
if not os.path.exists(self._plot_directory):
os.mkdir(self._plot_directory)
return self._plot_directory
def _next_filename(self):
self.previous_plot_number += 1
return os.path.join(self.plot_directory, "hydroplot_{0:=04}".format(self.previous_plot_number))
def evolve_model(self, end_time):
time_next_plot = self.time_last_plot + self._timestep
while time_next_plot < end_time:
super(PlottingHydrodynamics, self).__getattr__("evolve_model")(time_next_plot)
self._plot_function(self.gas_particles, **self._plot_function_arguments)
native_plot.savefig(self._next_filename())
native_plot.close()
self.time_last_plot = time_next_plot
time_next_plot = self.time_last_plot + self._timestep
super(PlottingHydrodynamics, self).__getattr__("evolve_model")(end_time)
PlottingHydrodynamics.__name__ = "Plotting" + hydrodynamics.__name__
return PlottingHydrodynamics
| 2,982
| 42.867647
| 134
|
py
|
amuse
|
amuse-main/src/amuse/ext/concurrent.py
|
import random
from amuse.rfi.channel import MpiChannel
from amuse.datamodel import Particles
from amuse.units.quantities import zero
from amuse.units import constants
import pickle
import numpy
try:
from mpi4py import MPI
except ImportError:
MPI = None
class ConcurrentProcesses(object):
pass
class MPIConcurrentProcesses(object):
ROOT = 0
@classmethod
def is_available(self):
return not MPI is None
def init(self):
MpiChannel.ensure_mpi_initialized()
self.mpi_comm = MPI.COMM_WORLD
self.shared_particles_ids = set([])
def share(self, particles = None):
sendbuffer = numpy.zeros(1, dtype='int64')
if self.mpi_comm == self.ROOT:
new_id = random.getrandbits(64)
sendbuffer[0] = new_id
self.mpi_comm.Bcast([sendbuffer, MPI.INTEGER8], root=self.ROOT)
shared_id = sendbuffer[0]
self.shared_particles_ids.add(shared_id)
return MPISharedParticlesProxy(particles, shared_id, self)
def is_on_root(self):
return self.mpi_comm.rank == self.ROOT
def on_root(self, callable):
if self.mpi_comm.rank == self.ROOT:
callable()
def not_on_root(self, callable):
if self.mpi_comm.rank != self.ROOT:
callable()
def call(self, on_root, not_on_root):
if self.mpi_comm.rank == self.ROOT:
on_root()
else:
not_on_root()
class MPISharedParticlesProxy(object):
def __init__(self, particles, shared_id, concurrent_processes):
self.shared_id = shared_id
self.particles = particles
self.concurrent_processes = concurrent_processes
def __getattr__(self, name):
return self.particles.__getattr__(name)
def distribute(self):
self.concurrent_processes.call(
self.distribute_on_root,
self.distribute_not_on_root
)
def distribute_on_root(self):
attribute_names = self.particles.get_attribute_names_defined_in_store()
values = self.particles.get_values_in_store(
self.particles.get_all_indices_in_store(),
attribute_names
)
units = [x.unit for x in values]
units_dump = pickle.dumps(units)
attributes_dump = pickle.dumps(attribute_names)
units_dump = numpy.fromstring(units_dump,dtype='uint8')
attributes_dump = numpy.fromstring(attributes_dump,dtype='uint8')
sendbuffer = numpy.zeros(4, dtype='int64')
sendbuffer[0] = self.shared_id
sendbuffer[1] = len(self.particles)
sendbuffer[2] = len(units_dump)
sendbuffer[3] = len(attributes_dump)
self.concurrent_processes.mpi_comm.Bcast([sendbuffer, MPI.INTEGER8], root=self.concurrent_processes.ROOT)
sendbuffer = self.particles.key
self.concurrent_processes.mpi_comm.Bcast([sendbuffer, MPI.INTEGER8], root=self.concurrent_processes.ROOT)
attribute_names = self.particles.get_attribute_names_defined_in_store()
self.concurrent_processes.mpi_comm.Bcast([units_dump, MPI.CHARACTER], root=self.concurrent_processes.ROOT)
self.concurrent_processes.mpi_comm.Bcast([attributes_dump, MPI.CHARACTER], root=self.concurrent_processes.ROOT)
for x, unit in zip(values, units):
value = x.value_in(unit)
self.concurrent_processes.mpi_comm.Bcast([value, MPI.DOUBLE], root=self.concurrent_processes.ROOT)
def distribute_not_on_root(self):
sendbuffer = numpy.zeros(4, dtype='int64')
self.concurrent_processes.mpi_comm.Bcast([sendbuffer, MPI.INTEGER8], root=self.concurrent_processes.ROOT)
shared_id = sendbuffer[0]
number_of_particles = sendbuffer[1]
units_dump_len = sendbuffer[2]
attributes_dump_len = sendbuffer[3]
sendbuffer = numpy.zeros(number_of_particles, dtype='int64')
self.concurrent_processes.mpi_comm.Bcast([sendbuffer, MPI.INTEGER8], root=self.concurrent_processes.ROOT)
units_dump = numpy.zeros(units_dump_len, dtype='uint8')
self.concurrent_processes.mpi_comm.Bcast([units_dump, MPI.CHARACTER], root=self.concurrent_processes.ROOT)
attributes_dump = numpy.zeros(attributes_dump_len, dtype='uint8')
self.concurrent_processes.mpi_comm.Bcast([attributes_dump, MPI.CHARACTER], root=self.concurrent_processes.ROOT)
units = pickle.loads(units_dump.tobytes())
attributes = pickle.loads(attributes_dump.tobytes())
values = []
for x in units:
value = numpy.zeros(number_of_particles, dtype='float64')
self.concurrent_processes.mpi_comm.Bcast([value, MPI.DOUBLE], root=self.concurrent_processes.ROOT)
values.append(x.new_quantity(value))
self.particles = Particles(keys = sendbuffer)
self.particles.set_values_in_store(self.particles.get_all_indices_in_store(), attributes, values)
def potential_energy(self, smoothing_length_squared = zero, G = constants.G):
mpi_comm = self.concurrent_processes.mpi_comm
mass = self.mass
x_vector = self.x
y_vector = self.y
z_vector = self.z
sum_of_energies = zero
number_of_particles = len(self)
block_size = (number_of_particles - 1) // mpi_comm.size
start = mpi_comm.rank * block_size
if mpi_comm.rank == (mpi_comm.size - 1):
block_size = (number_of_particles - 1) - start
for i in range(start, start + block_size):
x = x_vector[i]
y = y_vector[i]
z = z_vector[i]
dx = x - x_vector[i+1:]
dy = y - y_vector[i+1:]
dz = z - z_vector[i+1:]
dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
dr = (dr_squared+smoothing_length_squared).sqrt()
m_m = mass[i] * mass[i+1:]
energy_of_this_particle = (m_m / dr).sum()
sum_of_energies -= energy_of_this_particle
value = sum_of_energies.value_in(sum_of_energies.unit)
# for not assume unit is the same accross processes,
# so units are not distributed!
input = numpy.zeros(1, dtype='float64')
output = numpy.zeros(1, dtype='float64')
input[0] = value
mpi_comm.Reduce(
[input, MPI.DOUBLE],
[output, MPI.DOUBLE],
op=MPI.SUM,
root=0
)
return G * sum_of_energies.unit.new_quantity(output[0])
def __len__(self):
return len(self.particles)
| 6,851
| 35.253968
| 119
|
py
|
amuse
|
amuse-main/src/amuse/ext/grid_remappers.py
|
import numpy
from amuse.units import units
from amuse.units.quantities import is_quantity, value_in, to_quantity
from amuse.datamodel import UnstructuredGrid, StructuredGrid, StructuredBaseGrid, RegularBaseGrid
try:
import matplotlib
from matplotlib import tri
if not hasattr(tri, "LinearTriInterpolator"):
raise Exception("LinearTriInterpolator not in matplotlib.tri")
matplotlib_available=True
except:
matplotlib_available=False
from warnings import warn
class interpolating_2D_remapper(object):
def __init__(self, source, target,axes_names=None):
""" this class maps a source grid to a target grid using linear
interpolation on a triangulation generated by adding a
midpoint to every cell (source should be a structured grid)
and thus generating 4 triangles for each cell. Values of the
midpoints are averaged from the corners.
"""
if len(source.shape) !=2:
raise Exception("source grid is not 2D")
if not isinstance(source, StructuredBaseGrid):
raise Exception("source grid is not instance of StructuredBaseGrid")
self.source=source
self.target=target
self._axes_names=list(axes_names or source.get_axes_names())
self.generate_triangulation()
def _generate_nodes(self,grid,attributes):
Nx,Ny=grid.shape
x,y=numpy.mgrid[0:Nx,0:Ny]
x1,y1=numpy.mgrid[0:Nx-1,0:Ny-1]
x_=x.flatten()
y_=y.flatten()
x1_=x1.flatten()
y1_=y1.flatten()
l1=Nx*Ny
i=numpy.arange(Nx*Ny).reshape((Nx,Ny))
i1=(numpy.arange((Nx-1)*(Ny-1))+l1).reshape((Nx-1,Ny-1))
nodes=UnstructuredGrid(len(x_)+len(x1_))
for name in attributes:
values1=getattr(grid,name)[x_,y_]
values2=getattr(grid,name)[x1_,y1_]+getattr(grid,name)[x1_+1,y1_]+\
getattr(grid,name)[x1_,y1_+1]+getattr(grid,name)[x1_+1,y1_+1]
setattr(nodes[0], name, 0.*values1[0])
setattr(nodes[:l1], name, 1.*values1)
setattr(nodes[l1:], name, values2/4)
return nodes
def _generate_elements_and_boundaries(self,grid):
Nx,Ny=grid.shape
l1=Nx*Ny
i=numpy.arange(Nx*Ny).reshape((Nx,Ny))
i1=(numpy.arange((Nx-1)*(Ny-1))+l1).reshape((Nx-1,Ny-1))
e1=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i')
e2=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i')
e3=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i')
e4=numpy.zeros(((Nx-1)*(Ny-1),3),dtype='i')
e1[:,0]=i[:-1,:-1].flatten()
e1[:,1]=i[1:,:-1].flatten()
e1[:,2]=i1[:,:].flatten()
e2[:,0]=i[1:,:-1].flatten()
e2[:,1]=i[1:,1:].flatten()
e2[:,2]=i1[:,:].flatten()
e3[:,0]=i[1:,1:].flatten()
e3[:,1]=i[:-1,1:].flatten()
e3[:,2]=i1[:,:].flatten()
e4[:,0]=i[:-1,:-1].flatten()
e4[:,1]=i1[:,:].flatten()
e4[:,2]=i[:-1,1:].flatten()
elements=numpy.zeros((4*(Nx-1)*(Ny-1),3),dtype='i8')
elements[0::4,:]=e1
elements[1::4,:]=e2
elements[2::4,:]=e3
elements[3::4,:]=e4
boundaries=[xx.flatten() for xx in [i[:,0],i[-1,:],i[::-1,-1],i[0,::-1]] ]
elem=UnstructuredGrid(len(elements))
elem.nodes=elements
return elem,boundaries
def convert_grid_to_nodes_and_elements(self, grid, attributes=None):
if attributes is None:
attributes=grid.get_attribute_names_defined_in_store()
nodes=self._generate_nodes(grid, attributes)
elements,boundaries=self._generate_elements_and_boundaries(grid)
return nodes,elements,boundaries
def generate_triangulation(self):
nodes,elements,boundaries=self.convert_grid_to_nodes_and_elements(self.source, self._axes_names)
xpos=to_quantity(getattr(nodes,self._axes_names[0]))
ypos=to_quantity(getattr(nodes,self._axes_names[1]))
self._xpos_unit=xpos.unit
xpos=xpos.number
self._ypos_unit=ypos.unit
ypos=ypos.number
n1=elements.nodes[:,0]
n2=elements.nodes[:,1]
n3=elements.nodes[:,2]
elem=numpy.column_stack((n1,n2,n3))
self._triangulation=tri.Triangulation(xpos,ypos,elem)
def sample(self, values, xpos, ypos):
interpolator=tri.LinearTriInterpolator(self._triangulation,values)
return interpolator(xpos,ypos)
def forward_mapping(self, attributes, target_names=None):
if attributes is None:
attributes=self.source.get_attribute_names_defined_in_store()
if target_names is None:
target_names=attributes
source=self.source.empty_copy()
channel1=self.source.new_channel_to(source)
target=self.target.empty_copy()
channel2=self.target.new_channel_to(target)
channel3=target.new_channel_to(self.target)
channel1.copy_attributes(attributes)
channel2.copy_attributes(self._axes_names)
nodes=self._generate_nodes(source,attributes)
xpos=value_in( getattr(target,self._axes_names[0]), self._xpos_unit)
ypos=value_in( getattr(target,self._axes_names[1]), self._ypos_unit)
for attribute, target_name in zip(attributes, target_names):
values=to_quantity( getattr(nodes,attribute) )
unit=values.unit
values=values.number
samples=self.sample(values,xpos,ypos)
setattr(target, target_name, (samples if unit is units.none else (samples | unit)))
channel3.copy_attributes(target_names)
class bilinear_2D_remapper(object):
def __init__(self, source, target, check_inside=True, do_slices=False):
""" this class maps a source grid to a target grid using bilinear
interpolation. If check_inside=True, raise exception if any
target point outside source grid. If the grids are 3 dimensional
it can be used to do a 2D interpolation of each level if shapes
are the same and positions are the same in the 3rd dimension.
"""
if len(source.shape)!=2 and not do_slices:
raise Exception("source grid is not 2D, set do_slices=True remapping by slices")
if len(source.shape)!=len(target.shape):
raise Exception("incompatible shapes")
if len(source.shape)!=2:
if numpy.any(source.shape[2:]!=target.shape[2:]):
raise Exception("source and target need same shapes (after dim 2)")
if not isinstance(source, RegularBaseGrid):
raise Exception(f"source grid ({type(source)}) is not instance of RegularBaseGrid")
self.source=source
self.target=target
self._axes_names=source.get_axes_names()
if len(source.shape)!=2:
for x in self._axes_names[2:]:
if numpy.any(getattr(source[0,0],x)!=getattr(target[0,0],x)):
print(getattr(source[0,0],x))
print(getattr(target[0,0],x))
warn(f"positions (possibly) not the same on axes {x}")
self.check_inside=check_inside
self._weights=None
self._indices=None
def _calculate_weights(self):
x0=to_quantity( getattr(self.source[0,0], self._axes_names[0]) )
x1=to_quantity( getattr(self.source[1,1], self._axes_names[0]) )
y0=to_quantity( getattr(self.source[0,0], self._axes_names[1]) )
y1=to_quantity( getattr(self.source[1,1], self._axes_names[1]) )
# guaranteed by grid being RegularBaseGrid
assert x0.max()==x0.min()
assert x1.max()==x1.min()
assert y0.max()==y0.min()
assert y1.max()==y1.min()
x0=x0.min()
x1=x1.min()
y0=y0.min()
y1=y1.min()
dx=x1-x0
dy=y1-y0
x=getattr(self.target, self._axes_names[0])
y=getattr(self.target, self._axes_names[1])
ix=numpy.floor((x-x0)/dx).astype(int)
iy=numpy.floor((y-y0)/dy).astype(int)
if self.check_inside:
if numpy.any(ix<0) or numpy.any(ix>self.source.shape[0]-2) or \
numpy.any(iy<0) or numpy.any(iy>self.source.shape[1]-2):
raise Exception("target not fully inside (restricted) source grid as required")
ix=numpy.clip(ix,0, self.source.shape[0]-2)
iy=numpy.clip(iy,0, self.source.shape[1]-2)
wx=(x0+(ix+1)*dx-x)/dx
wy=(y0+(iy+1)*dy-y)/dy
wx=numpy.clip(wx,0.,1.)
wy=numpy.clip(wy,0.,1.)
self._weights=[wx,wy]
while(len(ix.shape)>2):
ix=numpy.amax(ix,axis=-1)
iy=numpy.amax(iy,axis=-1)
self._indices=[ix,iy]
def _evaluate(self, values):
ix,iy=self._indices
wx,wy=self._weights
result=wx*wy*values[ix,iy]+(1.-wx)*wy*values[ix+1,iy]+ \
wx*(1.-wy)*values[ix,iy+1]+(1.-wx)*(1.-wy)*values[ix+1,iy+1]
return result
def forward_mapping(self, attributes, target_names=None):
if attributes is None:
attributes=self.source.get_attribute_names_defined_in_store()
if target_names is None:
target_names=attributes
if self._weights is None:
self._calculate_weights()
mapped_values=[]
for attribute, target_name in zip(attributes, target_names):
values=getattr(self.source,attribute)
samples=self._evaluate(values)
mapped_values.append(samples)
self.target.set_values_in_store(None, target_names, mapped_values)
class nearest_2D_remapper(object):
def __init__(self, source, target, check_inside=True):
""" this class maps a source grid to a target grid getting closest
grid value. If check_inside=True, raise exception if any
target point outside source grid.
"""
if len(source.shape) !=2:
raise Exception("source grid is not 2D")
if not isinstance(source, RegularBaseGrid):
raise Exception("source grid is not instance of RegularBaseGrid")
self.source=source
self.target=target
self._axes_names=source.get_axes_names()
self.check_inside=check_inside
self._indices=None
def _calculate_weights(self):
x=getattr(self.target, self._axes_names[0])
y=getattr(self.target, self._axes_names[1])
kwargs={self._axes_names[0]: x, self._axes_names[1]:y}
indices=self.source.get_index(**kwargs)
ix=indices[...,0]
iy=indices[...,1]
if self.check_inside:
if numpy.any(ix<0) or numpy.any(ix>self.source.shape[0]-1) or \
numpy.any(iy<0) or numpy.any(iy>self.source.shape[1]-1):
raise Exception("target not fully inside source grid as required")
ix=numpy.clip(ix,0, self.source.shape[0]-1)
iy=numpy.clip(iy,0, self.source.shape[1]-1)
self._indices=[ix,iy]
def _evaluate(self, values):
return values[self._indices[0], self._indices[1]]
def forward_mapping(self, attributes, target_names=None):
if attributes is None:
attributes=self.source.get_attribute_names_defined_in_store()
if target_names is None:
target_names=attributes
if self._indices is None:
self._calculate_weights()
mapped_values=[]
for attribute, target_name in zip(attributes, target_names):
values=getattr(self.source,attribute)
samples=self._evaluate(values)
mapped_values.append(samples)
self.target.set_values_in_store(None, target_names, mapped_values)
def conservative_spherical_remapper(*args,**kwargs):
raise Exception("conservative_spherical_remapper has moved to omuse.ext")
| 12,018
| 35.755352
| 104
|
py
|
amuse
|
amuse-main/src/amuse/ext/kingmodel.py
|
from amuse.ic.kingmodel import *
| 33
| 16
| 32
|
py
|
amuse
|
amuse-main/src/amuse/ext/galactic_potentials.py
|
import numpy
from amuse.units import constants, units
from amuse.support.literature import LiteratureReferencesMixIn
from amuse.support.exceptions import AmuseWarning
try:
from scipy.special import gammainc,gamma
scipy_imported = True
except:
scipy_imported = False
class NFW_profile(LiteratureReferencesMixIn):
"""
Gravitational potential of the NFW (1996) halo
Two-power density spherical model suitable for modeling dark matter halos.
Density--potential pair:
* density(r) = rho0 / [r/rs * (1+r/rs)^2], where is the spherical radius
* potential(r) = -4*pi*G*rho0*rs^2 * ln(1+r/rs)/(r/rs)
.. [#] Navarro, Julio F.; Frenk, Carlos S.; White, Simon D. M., The Astrophysical Journal, Volume 490, Issue 2, pp. 493-508 (1996)
:argument rho0: density parameter
:argument rs: scale radius
"""
def __init__(self,rho0,rs,G=constants.G):
LiteratureReferencesMixIn.__init__(self)
self.rho0 = rho0
self.rs = rs
self.G = G
self.four_pi_rho0 = 4.*numpy.pi*self.rho0
self.four_pi_rho0_G = self.four_pi_rho0*self.G
def radial_force(self,r):
r_rs = r/self.rs
ar = self.four_pi_rho0_G*self.rs**3*(1./(r*self.rs+r**2)-(1./r**2)*numpy.log(1.+r_rs))
#ar = self.four_pi_rho0_G*self.rs*((r_rs-(1.+r_rs)*numpy.log(1.+r_rs))/r_rs**2/(1.+r_rs))
return ar
def get_potential_at_point(self,eps,x,y,z):
r = (x**2+y**2+z**2).sqrt()
r_rs = r/self.rs
return -1.*self.four_pi_rho0_G*self.rs**2*numpy.log(1.+r_rs)/r_rs
def get_gravity_at_point(self,eps,x,y,z):
r = (x**2+y**2+z**2).sqrt()
fr = self.radial_force(r)
ax = fr*x/r
ay = fr*y/r
az = fr*z/r
return ax,ay,az
def enclosed_mass(self,r):
fr = self.radial_force(r)
return -r**2/self.G*fr
def circular_velocity(self,r):
fr = self.radial_force(r)
return (-r*fr).sqrt()
def mass_density(self,r):
r_rs = r/self.rs
return self.rho0 / (r_rs*(1.+r_rs)**2)
class MiyamotoNagai_profile(LiteratureReferencesMixIn):
"""
Miyamoto and Nagai (1975) axisymmetric disk.
* potential(R,z) = -GM / sqrt(R**2 + (a+sqrt(z**2+b**2))**2)
.. [#] Miyamoto, M.; Nagai, R., Astronomical Society of Japan, Publications, vol. 27, no. 4, 1975, p. 533-543 (1975)
:argument mass: total mass
:argument a: disk scale radius
:argument b: disk scale height
"""
def __init__(self,mass,a,b,G=constants.G):
LiteratureReferencesMixIn.__init__(self)
self.mass = mass
self.a = a
self.b = b
self.G = G
self.GM = self.G*self.mass
self.a2 = self.a**2
self.b2 = self.b**2
def force_R(self,x,y,z):
R2 = x**2+y**2
R = R2.sqrt()
sqrt_z2_b2 = (z**2+self.b2).sqrt()
return -self.GM*R*(R2+(self.a+sqrt_z2_b2)**2)**(-1.5)
def force_z(self,x,y,z):
R2 = x**2+y**2
sqrt_z2_b2 = (z**2+self.b2).sqrt()
a_sqrt_z2_b2 = self.a+sqrt_z2_b2
return -self.GM*z*a_sqrt_z2_b2/((R2+a_sqrt_z2_b2**2)**1.5*sqrt_z2_b2)
def get_potential_at_point(self,eps,x,y,z):
R2 = x**2+y**2
return -self.GM/(R2+(self.a+(self.b2+z**2).sqrt())**2).sqrt()
def get_gravity_at_point(self,eps,x,y,z):
fR = self.force_R(x,y,z)
R = (x**2+y**2).sqrt()
ax = fR*x/R
ay = fR*y/R
az = self.force_z(x,y,z)
return ax,ay,az
def mass_density(self,x,y,z):
R2 = x**2+y**2
z2_b2 = z**2+self.b2
sqrt_z2_b2 = z2_b2.sqrt()
rho = self.b2*self.mass/(4.*numpy.pi) * \
(self.a*R2+(self.a+3.*sqrt_z2_b2)*(self.a+sqrt_z2_b2)**2) / \
((R2+(self.a+sqrt_z2_b2)**2)**2.5*z2_b2**1.5)
return rho
def circular_velocity_at_z0(self,R):
fR_at_z0 = self.force_R(R,0.|units.kpc,0.|units.kpc)
return (-R*fR_at_z0).sqrt()
def equivalent_enclosed_mass_in_plane(self,R):
"""
mass, that would be enclosed in profile corresponding the disk profile in the
galactic plane (z=0)
"""
fR_at_z0 = self.force_R(R,0.|units.kpc,0.|units.kpc)
return -R**2/self.G*fR_at_z0
class Plummer_profile(LiteratureReferencesMixIn):
"""
Spherically symmetric Plummer (1911) profile
* potential(r) = -GM / sqrt(a**2 + r**2)
* density(r) = (3M/4pi*a**3) * (1+(r/a)**2)**(-5/2)
.. [#] Plummer, H. C., MNRAS, Vol. 71, p.460-470 (1911)
:argument mass: total mass
:argument a: scale radius
"""
def __init__(self,mass,a,G=constants.G):
LiteratureReferencesMixIn.__init__(self)
self.mass = mass
self.a = a
self.G = G
self.GM = self.G*self.mass
self.a2 = self.a**2
def radial_force(self,r):
r2 = r**2
return -self.GM*r*(r2+self.a2)**(-1.5)
def get_gravity_at_point(self,eps,x,y,z):
r = (x**2+y**2+z**2).sqrt()
fr = self.radial_force(r)
ax = fr*x/r
ay = fr*y/r
az = fr*z/r
return ax, ay, az
def get_potential_at_point(self,eps,x,y,z):
r2 = x**2+y**2+z**2
return -self.GM/(r2+self.a2).sqrt()
def mass_density(self,r):
return self.mass/(4./3.*numpy.pi*self.a**3)*(1.+(r/self.a)**2)**(-2.5)
def enclosed_mass(self,r):
fr = self.radial_force(r)
return -r**2/self.G*fr
def circular_velocity(self,r):
fr = self.radial_force(r)
return (-r*fr).sqrt()
class PowerLawCutoff_profile(LiteratureReferencesMixIn):
"""
Spherically symmetric power-law density with exponential cut-off
* density(r) = rho0*(r0/r)^alpha*exp(-(r/rc)^2)
:argument rho0: density amplitude
:argument r0: power-law scaling radius
:argument alpha: power-law index, alpha<3
:argument rc: cut-off radius
"""
def __init__(self,rho0,r0,alpha,rc,G=constants.G):
LiteratureReferencesMixIn.__init__(self)
self.rho0 = rho0
self.r0 = r0
self.alpha = alpha
self.rc = rc
self.G = G
self.rho0_r0_to_alpha = self.rho0*self.r0**self.alpha
if 3.<=self.alpha: print("Warning: power-law index must be less than 3.")
def get_potential_at_point(self,eps,x,y,z):
if scipy_imported == False:
AmuseWarning("importing scipy failed, maybe not installed")
r = (x**2+y**2+z**2).sqrt()
r_rc = r/self.rc
return -2.*numpy.pi*self.G*self.rho0_r0_to_alpha*self.rc**(3.-self.alpha)/r* \
(r/self.rc*gamma(1.-self.alpha/2.)*gammainc(1.-self.alpha/2.,(r/self.rc)**2.)-gamma(1.5-self.alpha/2.)*gammainc(1.5-self.alpha/2.,(r/self.rc)**2.))
def radial_force(self,r):
Mr = self.enclosed_mass(r)
return -self.G*Mr/r**2
def get_gravity_at_point(self,eps,x,y,z):
r = (x**2+y**2+z**2).sqrt()
fr = self.radial_force(r)
ax = fr*x/r
ay = fr*y/r
az = fr*z/r
return ax, ay, az
def mass_density(self,r):
return self.rho0_r0_to_alpha*r**(-self.alpha)*numpy.exp(-(r/self.rc)**2.)
def enclosed_mass(self,r):
"""
careful with scipy.special.gammainc :
gammainc(a,x) = 1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
"""
if scipy_imported == False:
AmuseWarning("importing scipy failed, maybe not installed")
return 2.*numpy.pi*self.rho0_r0_to_alpha*self.rc**(3.-self.alpha)* \
gamma(1.5-0.5*self.alpha)*gammainc(1.5-0.5*self.alpha,(r/self.rc)**2)
def circular_velocity(self,r):
fr = self.radial_force(r)
return (-r*fr).sqrt()
class MWpotentialBovy2015(LiteratureReferencesMixIn):
"""
MW-like galaxy potential consists of a bulge modeled as a power-law density
profile that is exponentially cut-off, a Miyamoto & Nagai disk; and a
NFW dark-matter halo. Parameters of individual components are based on
fits to observational data. In addition to these constraints, the solar
distance to the Galactic center is set to R0=8kpc and the circular velocity
at the Sun to V0=220km/s.
.. [#] Bovy, J; ApJSS, Volume 216, Issue 2, article id. 29, 27 pp. (2015)
"""
def __init__(self):
LiteratureReferencesMixIn.__init__(self)
self.bulge = PowerLawCutoff_profile(2.22638e8|units.MSun/units.kpc**3, 1.|units.kpc, 1.8, 1.9|units.kpc)
self.disk = MiyamotoNagai_profile(6.81766163214e10|units.MSun, 3.|units.kpc, 0.28|units.kpc)
self.halo = NFW_profile(8484685.92946|units.MSun/units.kpc**3, 16.|units.kpc)
def get_potential_at_point(self,eps,x,y,z):
return self.bulge.get_potential_at_point(eps,x,y,z) + \
self.disk.get_potential_at_point(eps,x,y,z) + \
self.halo.get_potential_at_point(eps,x,y,z)
def get_gravity_at_point(self,eps,x,y,z):
ax_b,ay_b,az_b = self.bulge.get_gravity_at_point(eps,x,y,z)
ax_d,ay_d,az_d = self.disk.get_gravity_at_point(eps,x,y,z)
ax_h,ay_h,az_h = self.halo.get_gravity_at_point(eps,x,y,z)
return ax_b+ax_d+ax_h, ay_b+ay_d+ay_h, az_b+az_d+az_h
def mass_density(self,x,y,z):
r = (x**2+y**2+z**2).sqrt()
return self.bulge.mass_density(r)+self.disk.mass_density(x,y,z)+self.halo.mass_density(r)
def circular_velocity(self,r):
return (self.bulge.circular_velocity(r)**2+self.disk.circular_velocity_at_z0(r)**2+self.halo.circular_velocity(r)**2).sqrt()
def enclosed_mass(self,r):
return self.bulge.enclosed_mass(r)+self.disk.equivalent_enclosed_mass_in_plane(r)+self.halo.enclosed_mass(r)
| 9,832
| 34.756364
| 162
|
py
|
amuse
|
amuse-main/src/amuse/ext/speed.py
|
import numpy
import sys
import time
from amuse.community.hermite0.interface import HermiteInterface
try:
from matplotlib import pyplot
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def calculate_speed(range_of_number_of_particles):
result = []
for n in range_of_number_of_particles: #range(8000,20000, 1000):
hermite1 = HermiteInterface()
hermite1.initialize_code()
hermite2 = HermiteInterface()
hermite2.initialize_code()
ids = [i for i in range(1,n)]
values = [1.0 * i for i in range(1,n)]
t0 = time.time()
hermite1.new_particle(values
, values
, values
, values
, values
, values
, values
, values)
t1 = time.time()
d1 = t1 - t0
#print d1, t1, t0
t0 = time.time()
for i in range(n-1):
hermite2.new_particle(values[i]
, values[i]
, values[i]
, values[i]
, values[i]
, values[i]
, values[i]
, values[i])
t1 = time.time()
d2 = t1 - t0
result.append((n, d1, d2, d2/d1))
hermite1.cleanup_code()
hermite2.cleanup_code()
del hermite1
del hermite2
return result
if __name__ == '__main__':
measurements = calculate_speed([
2500, 5000, 7500,
10000, 15000, 20000,
25000, 30000, 40000])
figure = pyplot.figure(figsize = (10,10))
plot = figure.add_subplot(1,1,1)
for number, dt0, dt1, ratio in measurements:
color = 'b'
plot.plot([number],[ratio], color + 'o')
plot.set_xlim(0.0, 40000)
plot.set_ylim(0.0, 350)
figure.savefig("speed.svg")
print(measurements)
| 2,200
| 27.217949
| 68
|
py
|
amuse
|
amuse-main/src/amuse/ext/blender/__init__.py
| 0
| 0
| 0
|
py
|
|
amuse
|
amuse-main/src/amuse/ext/blender/blender.py
|
try:
import Blender
import bpy
import pylab as pl
from Blender import Mesh
BLENDER_AVAILABLE = True
except ImportError as ex:
Mesh = None
BLENDER_AVAILABLE = False
class Primitives(object):
def __init__(self):
pass
@classmethod
def sphere(cli, segments, rings, radius):
me = Mesh.Primitives.UVsphere(segments, rings, radius)
scn = bpy.data.scenes.active # link object to current scene
ob = scn.objects.new(me, 'sphere')
return ob
@classmethod
def cube(cli, radius):
me = Mesh.Primitives.Cube(radius)
scn = bpy.data.scenes.active # link object to current scene
ob = scn.objects.new(me, 'cube')
return ob
def Redraw():
Blender.Redraw()
| 771
| 23.125
| 71
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/base.py
|
from amuse.support.core import CompositeDictionary
from amuse.support.core import OrderedDictionary
from amuse.support.core import late
from amuse.support.core import compare_version_strings
from amuse.support import exceptions
from amuse.units import constants
from amuse.units import units
from amuse.units import quantities
from amuse.units.quantities import Quantity
from amuse.units.quantities import new_quantity
from amuse.units.quantities import is_quantity
from amuse.units.quantities import zero
from amuse.units.quantities import AdaptingVectorQuantity
from amuse.units.quantities import as_vector_quantity
import numpy
import numpy.random
import random
import inspect
import warnings
class KeyGenerator(object):
def __next__(self):
pass
def next_set_of_keys(self, length):
pass
class BasicUniqueKeyGenerator(KeyGenerator):
def __init__(self, lowest_unique_key = 1):
self.lowest_unique_key = lowest_unique_key
def __next__(self):
new_key = self.lowest_unique_key
self.lowest_unique_key += 1
return new_key
def next_set_of_keys(self, length):
if length == 0:
return []
from_key = self.lowest_unique_key
to_key = from_key + length
self.lowest_unique_key += length
return numpy.arange(from_key, to_key)
class RandomNumberUniqueKeyGenerator(KeyGenerator):
DEFAULT_NUMBER_OF_BITS = 64
def __init__(self, number_of_bits = None, random = None):
if number_of_bits is None:
number_of_bits = self.DEFAULT_NUMBER_OF_BITS
if number_of_bits > 64:
raise exceptions.AmuseException("number of bits is larger than 64, this is currently unsupported!")
self.number_of_bits = number_of_bits
if random is None:
random = numpy.random.mtrand.RandomState()
self.random = random
def __next__(self):
return numpy.array([random.getrandbits(self.number_of_bits)], dtype=numpy.uint64)[0]
def next_set_of_keys(self, length):
if length == 0:
return []
try:
minint = -2** ((self.number_of_bits // 2) - 1)
maxint = 2** ((self.number_of_bits // 2) - 1)
low = self.random.randint(minint,maxint+1,length)
high = self.random.randint(minint,maxint+1,length)
return numpy.array(low + (high << 32), dtype=numpy.uint64)
except:
return numpy.array([random.getrandbits(self.number_of_bits) for i in range(length)], dtype='uint64')
UniqueKeyGenerator = RandomNumberUniqueKeyGenerator()
def set_sequential_key_generator(start_number):
global UniqueKeyGenerator
UniqueKeyGenerator = BasicUniqueKeyGenerator(lowest_unique_key = start_number)
def set_random_key_generator(number_of_bits = RandomNumberUniqueKeyGenerator.DEFAULT_NUMBER_OF_BITS):
global UniqueKeyGenerator
UniqueKeyGenerator = RandomNumberUniqueKeyGenerator(number_of_bits = number_of_bits)
class AttributeStorage(object):
"""
Abstract base class of particle storage objects. Implemented by the
storage classes and the particle sets
"""
__version__ = -1
def add_particles_to_store(self, keys, attributes = [], values = []):
"""
Adds particles with the given keys to the set. If attribute names
and values are specified these are also set for the new particles.
"""
pass
def remove_particles_from_store(self, keys):
"""
Removes particles with the given keys from the set.
"""
pass
def get_values_in_store(self, indices, attributes):
"""
Gets the values for the attributes of the particles at the given indices.
"""
pass
def set_values_in_store(self, indices, attributes, list_of_values_to_set):
"""
Sets the attributes of the particles at the given indices to the given values.
"""
pass
def get_attribute_names_defined_in_store(self):
"""
Gets the attribute names
"""
pass
def has_key_in_store(self, key):
"""
Returns true if the given key can be found in the store
"""
return False
def get_all_keys_in_store(self):
"""
Gets the list of all keys stored
"""
return []
def get_all_indices_in_store(self):
"""
Gets the list of all valid indices in store.
"""
return []
def get_indices_of_keys(self, keys):
"""
Returns the indices where the particles are stored with
the given keys.
"""
return []
def __len__(self):
"""
Returns the number of particles in store.
"""
return 0
def get_value_in_store(self, index, attribute):
"""
Returns the value of an attribute for a particle at the given
index.
"""
return self.get_values_in_store([index],[attribute])[0][0]
def get_defined_attribute_names(self):
return []
def get_defined_settable_attribute_names(self):
return self.get_defined_attribute_names()
class DerivedAttribute(object):
"""
Abstract base class for calculated properties and
methods on sets.
"""
def get_values_for_entities(self, particles):
return None
def set_values_for_entities(self, particles, value):
raise exceptions.AmuseException("cannot set value of a DerivedAttribute")
def get_value_for_entity(self, particles, particle, index):
return None
def set_value_for_entity(self, particles, key, value):
raise exceptions.AmuseException("cannot set value of a DerivedAttribute")
class AbstractAttributeValue(object):
def __str__(self):
return self._values.__str__()
def __repr__(self):
return self._values.__repr__()
def __eq__(self, other):
if isinstance(other, AbstractAttributeValue):
return self._values == other._values
else:
return self._values == other
def __len__(self):
return len(self.indices)
def __add__(self, other):
if isinstance(other, AbstractAttributeValue):
return self._values + other._values
else:
return self._values + other
def __radd__(self, other):
return self._values + other
def __sub__(self, other):
if isinstance(other, AbstractAttributeValue):
return self._values - other._values
else:
return self._values - other
def __rsub__(self, other):
return other - self._values
def __mul__(self, other):
if isinstance(other, AbstractAttributeValue):
return self._values * other._values
else:
return self._values * other
def __rmul__(self, other):
return self._values * other
#def __imul__(self, other):
# if isinstance(other, Vector):
# newvalues = self._values * other._values
# else:
# newvalues = self._values * other
# self._set_values_for_entities(newvalues)
# return newvalues
def __pow__(self, other):
return self._values ** other
def __div__(self, other):
if isinstance(other, AbstractAttributeValue):
return self._values / other._values
else:
return self._values / other
def __rdiv__(self, other):
return other / self._values
def is_quantity(self):
return hasattr(self._values, "is_quantity") and self._values.is_quantity()
def __getattr__(self, name):
return getattr(self._values, name)
def __neg__(self):
return -self._values
def __abs__(self):
return abs(self._values)
def __lt__(self, other):
if isinstance(other, AbstractAttributeValue):
return self._values < other._values
else:
return self._values < other
def __gt__(self, other):
if isinstance(other, AbstractAttributeValue):
return self._values > other._values
else:
return self._values > other
def __ne__(self, other):
if isinstance(other, AbstractAttributeValue):
return self._values != other._values
else:
return self._values != other
def __le__(self, other):
if isinstance(other, AbstractAttributeValue):
return self._values <= other._values
else:
return self._values <= other
def __ge__(self, other):
if isinstance(other, AbstractAttributeValue):
return self._values >= other._values
else:
return self._values >= other
def __dir__(self):
return dir(self._values)
class VectorAttributeValue(AbstractAttributeValue):
def __init__(self, collection, indices, attribute_names):
self.collection = collection
self.indices = indices
self.attribute_names = attribute_names
self._values
@late
def _values(self):
values = self.collection.get_values_in_store(self.indices, self.attribute_names)
unit_of_the_values = None
is_a_quantity = None
for quantity in values:
if unit_of_the_values is None:
is_a_quantity = is_quantity(quantity)
if is_a_quantity:
unit_of_the_values = quantity.unit
break
if is_a_quantity:
results = []
for quantity in values:
if unit_of_the_values is None:
unit_of_the_values = quantity.unit
results.append(quantity.value_in(unit_of_the_values))
else:
results = values
results = numpy.array(results)
for i in range(len(results.shape) - 1, 0, -1):
results = numpy.swapaxes(results,0,i)
if is_a_quantity:
return unit_of_the_values.new_quantity(results)
else:
return results
def __getitem__(self, index):
return self._values.__getitem__(index)
def __setitem__(self, index, value):
raise Exception("must implement set item")
def _set_values_for_entities(self, value):
is_value_a_quantity = is_quantity(value)
if is_value_a_quantity:
vectors = value.number
else:
vectors = numpy.asanyarray(value)
split = numpy.split(vectors, len(self.attribute_names), axis = vectors.ndim - 1)
split = [x.reshape(x.shape[0] if len(x.shape) <= 2 else x.shape[:-1]) for x in split]
if is_value_a_quantity:
list_of_values = []
for i in range(len(self.attribute_names)):
values = value.unit.new_quantity(split[i])
list_of_values.append(values)
else:
list_of_values = split
self.collection.set_values_in_store(self.indices, self.attribute_names, list_of_values)
class VectorAttribute(DerivedAttribute):
"""
Combine multiple attributes into a vecter attribute
"""
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def get_values_for_entities(self, instance):
#if 1:
# return VectorAttributeValue(instance, instance.get_all_indices_in_store(), self.attribute_names)
values = [instance.__getattr__(attribute) for attribute in self.attribute_names]
unit_of_the_values = None
is_a_quantity = None
for quantity in values:
if unit_of_the_values is None:
is_a_quantity = is_quantity(quantity)
if is_a_quantity:
unit_of_the_values = quantity.unit
break
if is_a_quantity:
results = []
for quantity in values:
if unit_of_the_values is None:
unit_of_the_values = quantity.unit
results.append(quantity.value_in(unit_of_the_values))
else:
results = values
results = numpy.array(results)
for i in range(len(results.shape) - 1, 0, -1):
results = numpy.swapaxes(results,0,i)
if is_a_quantity:
return unit_of_the_values.new_quantity(results)
else:
return results
def set_values_for_entities(self, instance, value):
is_value_a_quantity = is_quantity(value)
if is_value_a_quantity:
vectors = value.number
else:
vectors = numpy.asanyarray(value)
split = numpy.split(vectors, len(self.attribute_names), axis = vectors.ndim - 1)
split = [x.reshape(x.shape[0] if len(x.shape) <= 2 else x.shape[:-1]) for x in split]
if is_value_a_quantity:
list_of_values = []
for i in range(len(self.attribute_names)):
values = value.unit.new_quantity(split[i])
list_of_values.append(values)
else:
list_of_values = split
instance.set_values_in_store(instance.get_all_indices_in_store(), self.attribute_names, list_of_values)
def get_value_for_entity(self, instance, particle, index):
values = instance._get_values_for_entity(index, self.attribute_names)
unit_of_the_values = None
is_a_quantity = None
for quantity in values:
if unit_of_the_values is None:
is_a_quantity = is_quantity(quantity)
if is_a_quantity:
unit_of_the_values = quantity.unit
break
if is_a_quantity:
results = []
for quantity in values:
results.append(quantity.value_in(unit_of_the_values))
return unit_of_the_values.new_quantity(results)
else:
return numpy.asarray(values)
def set_value_for_entity(self, instance, key, vector):
list_of_values = []
for quantity in vector:
if is_quantity(quantity):
list_of_values.append(quantity.as_vector_with_length(1))
else:
list_of_values.append(quantity)
instance._set_values_for_entity(key, self.attribute_names, list_of_values)
class CalculatedAttribute(DerivedAttribute):
"""
Calculate the value of an attribute based
on existing attributes.
"""
def __init__(self, function, attribute_names = None):
self.function = function
if attribute_names is None:
arguments, varargs, kwargs, defaults, kwonlyargs, kwonlydefaults, annotations = inspect.getfullargspec(function)
self.attribute_names = arguments
else:
self.attribute_names = attribute_names
def get_values_for_entities(self, instance):
values = instance.get_values_in_store(instance.get_all_indices_in_store(), self.attribute_names)
return self.function(*values)
def get_value_for_entity(self, particles, particle, index):
values = particles._get_values_for_entity(index, self.attribute_names)
return self.function(*values)
def new_particles_function_attribute_with_doc(function):
class BoundParticlesFunctionAttribute(object):
if function.__doc__:
__doc__ = ("\n Documentation on '{0}' particles function attribute:"
"\n\n".format(function.__name__) + function.__doc__)
_function = staticmethod(function)
def __init__(self):
self.particles = None
def __call__(self, *list_arguments, **keyword_arguments):
return self._function(self.particles, *list_arguments, **keyword_arguments)
return BoundParticlesFunctionAttribute
def new_particle_function_attribute_with_doc(function):
class BoundParticleFunctionAttribute(object):
if function.__doc__:
__doc__ = ("\n Documentation on '{0}' particle function attribute:"
"\n\n".format(function.__name__) + function.__doc__)
_function = staticmethod(function)
def __init__(self):
self.particles = None
self.index = None
self.particle = None
def __call__(self, *list_arguments, **keyword_arguments):
return self._function(self.particles, self.particle, *list_arguments, **keyword_arguments)
return BoundParticleFunctionAttribute
def new_caching_particles_function_attribute_with_doc(name, function):
class CachingBoundParticlesFunctionAttribute(object):
if function.__doc__:
__doc__ = ("\n Documentation on '{0}' particles function attribute:"
"\n\n".format(function.__name__) + function.__doc__)
_function = staticmethod(function)
_name = name
def __init__(self):
self.particles = None
def __call__(self, *list_arguments, **keyword_arguments):
cached_results = self.particles._private.cached_results
if self._name in cached_results.results:
result = cached_results.results[self._name]
else:
result = self._function(self.particles, *list_arguments, **keyword_arguments)
cached_results.results[self._name] = result
return result
return CachingBoundParticlesFunctionAttribute
class FunctionAttribute(DerivedAttribute):
def __init__(self, particles_function = None, particle_function = None):
self.particles_function_attribute = new_particles_function_attribute_with_doc(particles_function)
self.particle_function_attribute = new_particle_function_attribute_with_doc(particle_function)
def get_values_for_entities(self, particles):
function_attribute = self.particles_function_attribute()
function_attribute.particles = particles
return function_attribute
def get_value_for_entity(self, particles, particle, index):
function_attribute = self.particle_function_attribute()
function_attribute.particles = particles
function_attribute.particle = particle
function_attribute.index = index
return function_attribute
def __getstate__(self):
return self.particles_function_attribute._function, self.particle_function_attribute._function
def __setstate__(self, state):
self.particles_function_attribute = new_particles_function_attribute_with_doc(state[0])
self.particle_function_attribute = new_particle_function_attribute_with_doc(state[1])
class CachingFunctionAttribute(DerivedAttribute):
def __init__(self, name_of_the_attribute, particles_function = None, particle_function = None):
self.particles_function_attribute = new_caching_particles_function_attribute_with_doc(name_of_the_attribute, particles_function)
self.particle_function_attribute = new_particle_function_attribute_with_doc(particle_function)
def get_values_for_entities(self, particles):
function_attribute = self.particles_function_attribute()
function_attribute.particles = particles
return function_attribute
def get_value_for_entity(self, particles, particle, index):
function_attribute = self.particle_function_attribute()
function_attribute.particles = particles
function_attribute.particle = particle
function_attribute.index = index
return function_attribute
def __getstate__(self):
return self.particles_function_attribute._function, self.particle_function_attribute._function
def __setstate__(self, state):
if state[0]:
self.particles_function_attribute = new_caching_particles_function_attribute_with_doc(state[0].__name__,state[0])
if state[1]:
self.particle_function_attribute = new_particle_function_attribute_with_doc(state[1].__name__,state[1])
class CollectionAttributes(object):
"""
Objects of this class store attributes for
a particles collection or a grid.
These attributes are user set "meta" information such
as a timestamp.
"""
def __init__(self, attributes = None):
if attributes is None:
attributes = OrderedDictionary()
else:
attributes = attributes.copy()
object.__setattr__(self, '_attributes', attributes)
def __getattr__(self, name):
try:
return self._attributes[name]
except KeyError:
raise AttributeError('uknown attribute: {0!r}'.format(name))
def __setattr__(self, name, value):
self._attributes[name] = value
def __getstate__(self):
return self._attributes
def __setstate__(self, data):
object.__setattr__(self, '_attributes', data)
def __str__(self):
lines = []
for name, value in self._attributes.items():
lines.append("{0}: {1}".format(name, value))
return '\n'.join(lines)
def _copy_for_collection(self, newcollection):
return CollectionAttributes(self._attributes)
def iteritems(self):
return iter(self._attributes.items())
class CachedResults(object):
"""
Stores results for functions that only need to be called once
"""
def __init__(self):
self.results = {}
def __getstate__(self):
return {'results':{}}
class PrivateProperties(object):
"""
Defined for superclasses to store private properties.
Every set has :meth:`__setattr__` defined.
The :meth:`__setattr__` function will set all attributes
of the entities in the set to the specified value(s).
To be able to define attributes on the set itself we
use an instance of this class, attributes can be
defined as::
self._private.new_attribute = 'new value'
Subclass implementers do not need to
use the :meth:`object.__setattr__` syntax.
For documentation about the :meth:`~object.__setattr__`
call please see the
`python data model <http://docs.python.org/reference/datamodel.html>`_
documentation on the python website.
"""
pass
def __str__(self):
output = 'PrivateProperties('
output += str(id(self))
sorted_keys = sorted(self.__dict__.keys())
for name in sorted_keys:
value = self.__dict__[name]
output += '\n , '
output += name
output += '='
output += str(value)
output += ')'
return output
class UndefinedAttribute(object):
def __get__(self, obj, type=None):
raise AttributeError()
class AsynchronuousAccessToSet(object):
"""
Helper object to get asynchronuous acces to sets
"""
def __init__(self, store):
object.__setattr__(self, "_store", store)
def __getattr__(self, name_of_the_attribute):
#~ if name_of_the_attribute == '__setstate__':
#~ raise AttributeError('type object {0!r} has no attribute {1!r}'.format(type(self._store), name_of_the_attribute))
if name_of_the_attribute in self._store._derived_attributes:
raise AttributeError('type object {0!r} cannot asynchronuously access attribute {1!r}'.format(type(self._store), name_of_the_attribute))
try:
return self._store._convert_to_entities_or_quantities(
self._store.get_all_values_of_attribute_in_store_async(name_of_the_attribute)
)
except Exception as ex:
if name_of_the_attribute in self._store.get_attribute_names_defined_in_store():
raise
else:
raise AttributeError("You tried to access attribute '{0}'"
" but this attribute is not defined for this set.".format(name_of_the_attribute))
def __setattr__(self, name_of_the_attribute, value):
value = self._store.check_attribute(value)
if name_of_the_attribute in self._store._derived_attributes:
raise AttributeError('type object {0!r} cannot asynchronuously access attribute {1!r}'.format(type(self._store), name_of_the_attribute))
else:
request=self._store.set_values_in_store_async(self._store.get_all_indices_in_store(), [name_of_the_attribute], [self._store._convert_from_entities_or_quantities(value)])
def __setstate__(self, arg):
self.__dict__.update(arg)
class AbstractSet(object):
"""
Abstract superclass of all sets of particles and grids.
"""
GLOBAL_DERIVED_ATTRIBUTES = {}
# this construct is needed to ensure that numpy
# handles grids and particle sets as scalar objects
# and not as sequences
# if we put a grid in a numpy object array we want the
# grid in a field of that array and not the contents of
# the grid (i.e. the grid points)
if compare_version_strings(numpy.__version__, '1.7.0') < 0:
__array_interface__ = {'shape':() }
else:
__array_interface__ = {'shape':(),'typestr':'|O4' }
__array_struct__ = UndefinedAttribute()
__array__ = UndefinedAttribute()
def __init__(self, original = None):
if original is None:
derived_attributes = self.GLOBAL_DERIVED_ATTRIBUTES
else:
derived_attributes = original._derived_attributes
object.__setattr__(self, "_derived_attributes", CompositeDictionary(derived_attributes))
object.__setattr__(self, "_private", PrivateProperties())
object.__setattr__(self, "_request", AsynchronuousAccessToSet(self))
self._private.collection_attributes = CollectionAttributes()
self._private.cached_results = CachedResults()
@property
def collection_attributes(self):
return self._private.collection_attributes
@property
def key(self):
return self.get_all_keys_in_store()
def __getattr__(self, name_of_the_attribute):
if name_of_the_attribute== 'request':
return self._request
if name_of_the_attribute == '__setstate__':
raise AttributeError('type object {0!r} has no attribute {1!r}'.format(type(self), name_of_the_attribute))
if name_of_the_attribute in self._derived_attributes:
return self._derived_attributes[name_of_the_attribute].get_values_for_entities(self)
else:
try:
return self._convert_to_entities_or_quantities(
self.get_all_values_of_attribute_in_store(name_of_the_attribute)
)
except Exception as ex:
if name_of_the_attribute in self.get_attribute_names_defined_in_store():
raise
else:
raise AttributeError("You tried to access attribute '{0}'"
" but this attribute is not defined for this set.".format(name_of_the_attribute))
def _get_derived_attribute_value(self, name_of_the_attribute):
if name_of_the_attribute in self._derived_attributes:
return self._derived_attributes[name_of_the_attribute].get_values_for_entities(self)
else:
raise AttributeError("You tried to access attribute '{0}'"
" but this attribute is not defined for this set.".format(name_of_the_attribute))
def check_attribute(self, value):
if not (is_quantity(value) or hasattr(value, 'as_set')):
try:
value=as_vector_quantity(value)
if value.unit == units.none:
return value.number
return value
except:
return value
else:
return value
def __setattr__(self, name_of_the_attribute, value):
value = self.check_attribute(value)
if name_of_the_attribute in self._derived_attributes:
self._derived_attributes[name_of_the_attribute].set_values_for_entities(self, value)
else:
self.set_values_in_store(self.get_all_indices_in_store(), [name_of_the_attribute], [self._convert_from_entities_or_quantities(value)])
def _get_value_of_attribute(self, particle, index, attribute):
if attribute in self._derived_attributes:
return self._derived_attributes[attribute].get_value_for_entity(self, particle, index)
else:
return self.get_value_in_store(index, attribute)
def _get_values_for_entity(self, index, attributes):
return [x[0] for x in self.get_values_in_store([index], attributes)]
def _set_values_for_entity(self, index, attributes, values):
return self.set_values_in_store([index], attributes, values)
def _set_value_of_attribute(self, index, attribute, value):
if attribute in self._derived_attributes:
return self._derived_attributes[attribute].set_value_for_entity(self, index, value)
else:
return self.set_values_in_store(numpy.asarray([index]), [attribute], [value])
def _convert_to_entities_or_quantities(self, x):
if hasattr(x, 'unit') and x.unit.iskey():
return self._subset(x.number)
else:
return x
def _convert_to_entities_or_quantities_async(self, x):
def handler(inner):
result=inner()
return self._convert_to_entities_or_quantities_async(result)
x.add_handler(handler)
return x
def _convert_from_entities_or_quantities(self, x):
return x
#
# Particle storage interface
#
def get_all_values_of_attribute_in_store(self, attribute):
return self.get_values_in_store(self.get_all_indices_in_store(), [attribute])[0]
def get_all_values_of_attribute_in_store_async(self, attribute):
return self.get_values_in_store_async(self.get_all_indices_in_store(), [attribute])[0]
def get_values_in_store(self, indices, attributes):
pass
def set_values_in_store(self, indices, attributes, values):
pass
def add_particles_to_store(self, indices, attributes, values):
pass
def remove_particles_from_store(self, indices):
pass
def get_all_keys_in_store(self):
return []
def get_all_indices_in_store(self):
return []
def has_key_in_store(self):
return False
def get_attribute_names_defined_in_store(self):
return []
def _original_set(self):
return self
def add_vector_attribute(self, name_of_the_attribute, name_of_the_components):
self._derived_attributes[name_of_the_attribute] = VectorAttribute(name_of_the_components)
@classmethod
def add_global_vector_attribute(cls, name_of_the_attribute, name_of_the_components):
"""
Define a *global* vector attribute, coupling two or more scalar attributes into
one vector attribute. The vector will be defined for all particle sets
created after calling this function.
:argument name_of_the_attribute: Name to reference the vector attribute by.
:argument name_of_the_components: List of strings, each string a name of a scalar attribute.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> Particles.add_global_vector_attribute('vel', ['vx','vy'])
>>> particles.vx = [1.0 , 2.0] | units.m / units.s
>>> particles.vy = [3.0 , 4.0] | units.m / units.s
>>> particles.vel
quantity<[[1.0, 3.0], [2.0, 4.0]] m / s>
"""
cls.GLOBAL_DERIVED_ATTRIBUTES[name_of_the_attribute] = VectorAttribute(name_of_the_components)
def add_calculated_attribute(self, name_of_the_attribute, function, attributes_names = None):
"""
Define a read-only calculated attribute, values for the attribute are
calculated using the given function. The functions argument
names are interperted as attribute names. For example, if
the given function is::
def norm(x, y):
return (x*x + y*y).sqrt()
The attributes "x" and "y" will be retrieved from the particles
and send to the "norm" function.
The calculated values are not stored on the particles. Values
are recalculated every time this attribute is accessed.
:argument name_of_the_attribute: Name to reference the attribute by.
:argument function: Function to call, when attribute is accessed
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.x = [1.0 , 2.0] | units.m
>>> particles.y = [3.0 , 4.0] | units.m
>>> particles.add_calculated_attribute("xy", lambda x, y : x * y)
>>> print particles.xy
[3.0, 8.0] m**2
>>> particles[0].x = 4.0 | units.m
>>> print particles.xy
[12.0, 8.0] m**2
>>> print particles[1].xy
8.0 m**2
"""
self._derived_attributes[name_of_the_attribute] = CalculatedAttribute(function, attributes_names)
@classmethod
def add_global_calculated_attribute(cls, name_of_the_attribute, function, attributes_names = None):
"""
Define a *global* vector attribute, coupling two or more scalar attributes into
one vector attribute. The vector will be defined for all particle sets
created after calling this function.
:argument name_of_the_attribute: Name to reference the vector attribute by.
:argument function: Name of the function to call.
:argument attributes_names: List of strings, each string a name of a scalar attribute, if None uses argument names.
>>> from amuse.datamodel import Particles
>>> Particles.add_global_calculated_attribute("xy", lambda x, y : x * y)
>>> particles = Particles(2)
>>> particles.x = [1.0 , 2.0] | units.m
>>> particles.y = [3.0 , 4.0] | units.m
>>> print particles.xy
[3.0, 8.0] m**2
>>> del Particles.GLOBAL_DERIVED_ATTRIBUTES['xy']
"""
cls.GLOBAL_DERIVED_ATTRIBUTES[name_of_the_attribute] = CalculatedAttribute(function, attributes_names)
def add_function_attribute(self, name_of_the_attribute, function, function_for_particle = None):
"""
Define a function attribute, adding a function to the particles
:argument name_of_the_attribute: Name to reference the vector attribute by.
:argument function: A function, first argument will be the particles.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.x = [1.0 , 2.0] | units.m
>>> def sumx(p):
... return p.x.sum()
...
>>> particles.add_function_attribute("sum_of_x", sumx)
>>> particles.sum_of_x()
quantity<3.0 m>
"""
self._derived_attributes[name_of_the_attribute] = FunctionAttribute(function, function_for_particle)
def add_caching_function_attribute(self, name_of_the_attribute, function, function_for_particle = None):
"""
Define a function attribute, adding a function to the particles, the function will
be evaluated once for the set, after that the function will return the same results.
The function must not take any arguments and the caching only works for the whole set (not on single
particles)
:argument name_of_the_attribute: Name to reference the vector attribute by.
:argument function: A function, first argument will be the particles.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.x = [1.0, 2.0] | units.m
>>> def sumx(p):
... return p.x.sum()
...
>>> particles.add_caching_function_attribute("sum_of_x", sumx)
>>> particles.sum_of_x()
quantity<3.0 m>
>>> particles.x = [3.0, 4.0] | units.m
>>> particles.sum_of_x()
quantity<3.0 m>
"""
self._derived_attributes[name_of_the_attribute] = CachingFunctionAttribute(name_of_the_attribute, function, function_for_particle)
@classmethod
def add_global_function_attribute(cls, name_of_the_attribute, function, function_for_particle = None):
"""
Define a function attribute, adding a function to the particles
:argument name_of_the_attribute: Name to reference the attribute by.
:argument function: A function, first argument will be the particles.
>>> from amuse.datamodel import Particles
>>> def sumx(p):
... return p.x.sum()
...
>>> Particles.add_global_function_attribute("sum_of_x", sumx)
>>> particles = Particles(2)
>>> particles.x = [4.0 , 2.0] | units.m
>>> particles.sum_of_x()
quantity<6.0 m>
>>> del Particles.GLOBAL_DERIVED_ATTRIBUTES['sum_of_x']
"""
cls.GLOBAL_DERIVED_ATTRIBUTES[name_of_the_attribute] = FunctionAttribute(function, function_for_particle)
@classmethod
def add_global_caching_function_attribute(cls, name_of_the_attribute, function, function_for_particle = None):
"""
Define a function attribute, adding a function to the particles
:argument name_of_the_attribute: Name to reference the attribute by.
:argument function: A function, first argument will be the particles.
>>> from amuse.datamodel import Particles
>>> def sumx(p):
... return p.x.sum()
...
>>> Particles.add_global_caching_function_attribute("sum_of_x", sumx)
>>> particles = Particles(2)
>>> particles.x = [4.0 , 2.0] | units.m
>>> particles.sum_of_x()
quantity<6.0 m>
>>> particles.x = [5.0 , 6.0] | units.m
>>> particles.sum_of_x()
quantity<6.0 m>
>>> del Particles.GLOBAL_DERIVED_ATTRIBUTES['sum_of_x']
"""
cls.GLOBAL_DERIVED_ATTRIBUTES[name_of_the_attribute] = CachingFunctionAttribute(name_of_the_attribute, function, function_for_particle)
def add_particle_function_attribute(self, name_of_the_attribute, function):
"""
Define a function working on one particle
:argument name_of_the_attribute: Name to reference the attribute by.
:argument function: A function, first argument will be the particle.
>>> from amuse.datamodel import Particles
>>> def xsquared(set, p):
... return p.x * p.x
...
>>> particles = Particles(2)
>>> particles.add_particle_function_attribute("xsquared", xsquared)
>>> particles.x = [4.0 , 2.0] | units.m
>>> particles[0].xsquared()
quantity<16.0 m**2>
"""
self._derived_attributes[name_of_the_attribute] = FunctionAttribute(None, function)
def __len__(self):
return len(self.get_all_keys_in_store())
def copy(self, memento = None, keep_structure = False, filter_attributes = lambda particles, x : True):
""" Creates a new in particle set and copies all attributes and
values into this set.
The history of the set is not copied over.
Keyword arguments:
memento -- internal, a dictionary to keep track of already copied sets in
case of links between particles (default: None, will be created)
keep_structure -- internal, if True, in case of a sub or super set make a copy of
the original set and return a new subset (default: False)
"""
raise NotImplementedError()
def copy_to_memory(self):
warnings.warn("deprecated, pleasy change 'copy_to_memory' to 'copy'", DeprecationWarning)
return self.copy()
def _factory_for_new_collection(self):
raise NotImplementedError()
def copy_values_of_attribute_to(self, attribute_name, particles):
"""
Copy values of one attribute from this set to the
other set. Will only copy values for the particles
in both sets. See also :meth:`synchronize_to`.
If you need to do this a lot, setup a dedicated
channel.
>>> from amuse.datamodel import Particles,Particle
>>> particles1 = Particles(2)
>>> particles1.x = [1.0, 2.0] | units.m
>>> particles2 = particles1.copy()
>>> print particles2.x
[1.0, 2.0] m
>>> p3 = particles1.add_particle(Particle())
>>> particles1.x = [3.0, 4.0, 5.0] | units.m
>>> particles1.copy_values_of_attribute_to("x", particles2)
>>> print particles2.x
[3.0, 4.0] m
"""
channel = self.new_channel_to(particles)
channel.copy_attributes([attribute_name])
def new_channel_to(self, other):
raise NotImplementedError()
def __sub__(self, particles):
"""
Returns a subset of the set without the given particle(s)
Attribute values are not stored by the subset. The subset
provides a view on two or more sets of particles.
:parameter particles: (set of) particle(s) to be subtracted from self.
>>> from amuse.datamodel import Particles
>>> particles = Particles(4)
>>> particles.x = [1.0, 2.0, 3.0, 4.0] | units.m
>>> junk = particles[2:]
>>> new_set = particles - junk
>>> new_set # doctest:+ELLIPSIS
<amuse.datamodel.particles.ParticlesSubset object at 0x...>
>>> print len(new_set)
2
>>> print new_set.x
[1.0, 2.0] m
>>> print particles.x
[1.0, 2.0, 3.0, 4.0] m
"""
particles = particles.as_set()
new_keys = []
new_keys.extend(self.get_all_keys_in_store())
subtract_keys = particles.get_all_keys_in_store()
for key in subtract_keys:
if key in new_keys:
new_keys.remove(key)
else:
raise exceptions.AmuseException("Unable to subtract a particle, because "
"it is not part of this set.")
return self._subset(new_keys)
def add_particles(self, particles):
raise NotImplementedError()
def add_particle(self, particle):
"""
Add one particle to the set.
:parameter particle: particle to add
>>> from amuse.datamodel import Particles,Particle
>>> particles = Particles()
>>> print len(particles)
0
>>> particle = Particle()
>>> particle.x = 1.0 | units.m
>>> particles.add_particle(particle) # doctest:+ELLIPSIS
<amuse.datamodel.particles.Particle object at ...>
>>> print len(particles)
1
>>> print particles.x
[1.0] m
"""
return self.add_particles(particle.as_set())[0]
def remove_particles(self, particles):
"""
Removes particles from the supplied set from this set.
:parameter particles: set of particles to remove from this set
>>> from amuse.datamodel import Particles
>>> particles1 = Particles(2)
>>> particles1.x = [1.0, 2.0] | units.m
>>> particles2 = Particles()
>>> particles2.add_particle(particles1[0]) # doctest:+ELLIPSIS
<amuse.datamodel.particles.Particle object at ...>
>>> particles1.remove_particles(particles2)
>>> print len(particles1)
1
>>> print particles1.x
[2.0] m
"""
if len(particles) == 0:
return
keys = particles.get_all_keys_in_store()
self.remove_particles_from_store(keys)
def remove_particle(self, particle):
"""
Removes a particle from this set.
Result is undefined if particle is not part of the set
:parameter particle: particle to remove from this set
>>> from amuse.datamodel import Particles
>>> particles1 = Particles(2)
>>> particles1.x = [1.0, 2.0] | units.m
>>> particles1.remove_particle(particles1[0])
>>> print len(particles1)
1
>>> print particles1.x
[2.0] m
"""
self.remove_particles(particle.as_set())
def synchronize_to(self, other_particles):
"""
Synchronize the particles of this set
with the contents of the provided set.
After this call the proveded set will have
the same particles as the given set. This call
will check if particles have been removed or
added it will not copy values of existing particles
over.
:parameter other_particles: particle set wich has to be updated
>>> from amuse.datamodel import Particles, Particle
>>> particles = Particles(2)
>>> particles.x = [1.0, 2.0] | units.m
>>> copy = particles.copy()
>>> new_particle = Particle()
>>> new_particle.x = 3.0 | units.m
>>> particles.add_particle(new_particle)# doctest:+ELLIPSIS
<amuse.datamodel.particles.Particle object at ...>
>>> print particles.x
[1.0, 2.0, 3.0] m
>>> print copy.x
[1.0, 2.0] m
>>> particles.synchronize_to(copy)
>>> print copy.x
[1.0, 2.0, 3.0] m
"""
other_keys = set(other_particles.get_all_keys_in_store())
my_keys = set(self.get_all_keys_in_store())
added_keys = my_keys - other_keys
removed_keys = other_keys - my_keys
added_keys = list(added_keys)
if added_keys:
attributes = self.get_attribute_names_defined_in_store()
attributes= [x for x in attributes if x not in other_particles._derived_attributes]
values = self.get_values_in_store(added_keys, attributes)
other_particles.add_particles_to_store(added_keys, attributes, values)
removed_keys = list(removed_keys)
if removed_keys:
other_particles.remove_particles_from_store(removed_keys)
def copy_values_of_all_attributes_to(self, particles):
channel = self.new_channel_to(particles)
channel.copy_attributes(self.get_attribute_names_defined_in_store())
def as_set(self):
"""
Returns a subset view on this set. The subset
will contain all particles of this set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(3)
>>> particles.x = [1.0, 2.0, 3.0] | units.m
>>> subset = particles.as_set()
>>> print subset.x
[1.0, 2.0, 3.0] m
>>> print particles.x
[1.0, 2.0, 3.0] m
"""
return self._subset(self.get_all_keys_in_store())
def select(self, selection_function, attributes):
"""
Returns a subset view on this set. The subset
will contain all particles for which the selection
function returned True. The selection function
is called with scalar quantities defined by
the attributes parameter
>>> from amuse.datamodel import Particles
>>> particles = Particles(3)
>>> particles.mass = [10.0, 20.0, 30.0] | units.kg
>>> particles.x = [1.0, 2.0, 3.0] | units.m
>>> subset = particles.select(lambda x : x > 15.0 | units.kg, ["mass"])
>>> print subset.mass
[20.0, 30.0] kg
>>> print subset.x
[2.0, 3.0] m
"""
keys = self.get_all_keys_in_store()
#values = self._get_values(keys, attributes) #fast but no vectors
values = [getattr(self, x) for x in attributes]
selected_keys = []
for index in range(len(keys)):
key = keys[index]
arguments = [None] * len(attributes)
for attr_index, attribute in enumerate(attributes):
arguments[attr_index] = values[attr_index][index]
if selection_function(*arguments):
selected_keys.append(key)
return self._subset(selected_keys)
def select_array(self, selection_function, attributes = ()):
"""
Returns a subset view on this set. The subset
will contain all particles for which the selection
function returned True. The selection function
is called with a vector quantities containing all
the values for the attributes parameter.
This function can be faster than the select function
as it works on entire arrays. The selection_function
is called once.
>>> from amuse.datamodel import Particles
>>> particles = Particles(3)
>>> particles.mass = [10.0, 20.0, 30.0] | units.kg
>>> particles.x = [1.0, 2.0, 3.0] | units.m
>>> subset = particles.select_array(lambda x : x > 15.0 | units.kg, ["mass"])
>>> print subset.mass
[20.0, 30.0] kg
>>> print subset.x
[2.0, 3.0] m
>>> particles = Particles(999)
>>> particles.x = units.m.new_quantity(numpy.arange(1,1000))
>>> subset = particles.select_array(lambda x : x > (500 | units.m), ("x",) )
>>> print len(subset)
499
"""
keys = self.get_all_keys_in_store()
#values = self._get_values(keys, attributes) #fast but no vectors
values = [getattr(self, x) for x in attributes]
selections = selection_function(*values)
selected_keys = numpy.compress(selections, keys)
return self._subset(selected_keys)
def difference(self, other):
"""
Returns a new subset containing the difference between
this set and the provided set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(3)
>>> particles.mass = [10.0, 20.0, 30.0] | units.kg
>>> particles.x = [1.0, 2.0, 3.0] | units.m
>>> subset = particles.select(lambda x : x > 15.0 | units.kg, ["mass"])
>>> less_than_15kg = particles.difference(subset)
>>> len(subset)
2
>>> len(less_than_15kg)
1
"""
return self.as_set().difference(other)
def get_timestamp(self):
return None
def has_duplicates(self):
"""
Returns True when a set contains a particle with the
same key more than once. Particles with the same
key are interpreted as the same particles.
>>> from amuse.datamodel import Particles,Particle
>>> particles = Particles()
>>> p1 = particles.add_particle(Particle(1))
>>> p2 = particles.add_particle(Particle(2))
>>> particles.has_duplicates()
False
>>> p3 = particles.add_particle(Particle(1))
>>> particles.has_duplicates()
True
>>> p3 == p1
True
"""
return len(self) != len(set(self.get_all_keys_in_store()))
def _subset(self, keys):
raise NotImplementedError()
def __dir__(self):
"""
Utility function for introspection of paricle objects
>>> from amuse.datamodel import Particles
>>> particles = Particles(3)
>>> particles.mass = [10.0, 20.0, 30.0] | units.kg
>>> particles.x = [1.0, 2.0, 3.0] | units.m
>>> print 'mass' in dir(particles)
True
>>> print 'x' in dir(particles)
True
"""
result = []
result.extend(dir(type(self)))
result.extend(self._attributes_for_dir())
return result
def _attributes_for_dir(self):
result = []
result.extend(self.get_attribute_names_defined_in_store())
result.extend(list(self._derived_attributes.keys()))
return result
def all_attributes(self):
result = []
result.append('key')
result.extend(self._attributes_for_dir())
return result
def stored_attributes(self):
"""
Returns a list of the names of the attributes defined on
the objects in this set (or grid).
This list will not contain the set specific methods, or derived
attributes (such as function attributes or vector attributes)
for a list with all these attributes please use ``dir``.
>>> from amuse.datamodel import Particles
>>> particles = Particles(3)
>>> particles.mass = [10.0, 20.0, 30.0] | units.kg
>>> particles.x = [1.0, 2.0, 3.0] | units.m
>>> print particles.stored_attributes()
['key', 'mass', 'x']
"""
result = []
result.append('key')
result.extend(self.get_attribute_names_defined_in_store())
return result
def is_empty(self):
return self.__len__()==0
def get_value_in_store(self, key, attribute):
return self.get_values_in_store(numpy.asarray([key]),[attribute])[0][0]
def _convert_to_entity_or_quantity(self, x):
if is_quantity(x):
if x.unit.iskey():
return self._subset([x.number])[0]
else:
return x
else:
return x
def __add__(self, particles):
"""
Returns a particle subset, composed of the given
particle(s) and this particle set. Attribute values are
not stored by the subset. The subset provides a view
on two or more sets of particles.
:parameter particles: (set of) particle(s) to be added to self.
>>> from amuse.datamodel import Particles
>>> particles = Particles(4)
>>> particles1 = particles[:2]
>>> particles1.x = [1.0, 2.0] | units.m
>>> particles2 = particles[2:]
>>> particles2.x = [3.0, 4.0] | units.m
>>> new_set = particles1 + particles2
>>> new_set # doctest:+ELLIPSIS
<amuse.datamodel.particles.ParticlesSubset object at 0x...>
>>> print len(new_set)
4
>>> print new_set.x
[1.0, 2.0, 3.0, 4.0] m
"""
raise NotImplementedError()
@classmethod
def function_for_set(cls, function):
cls.add_global_function_attribute(function.__name__, function)
return function
@classmethod
def attribute_for_set(cls, function):
cls.add_global_calculated_attribute(function.__name__, function)
return function
@classmethod
def caching_function_for_set(cls, function):
cls.add_global_caching_function_attribute(function.__name__, function)
return function
def are_all_keys_in_set(self, keys):
try:
self.get_indices_of_keys(keys)
return True
except Exception as ex:
return False
class LinkedArray(numpy.ndarray):
"""Links between particles and particle sets are stored in LinkedArrays.
"""
def __new__(cls, input_array):
result = numpy.asarray(input_array).view(cls)
return result
def __array_finalize__(self, array_object):
if array_object is None:
return
def copy(self, memento = None, keep_structure = False, filter_attributes = lambda particle_set, x : True):
from amuse.datamodel.particles import Particle
from amuse.datamodel.grids import GridPoint
if memento is None:
memento = {}
result = LinkedArray(self.flatten())
index = 0
for x in result:
if x is None:
result[index] = None
elif isinstance(x, Particle):
container = x.get_containing_set()
if id(container) in memento:
copy_of_container = memento[id(container)]
else:
copy_of_container = container.copy(memento, keep_structure, filter_attributes)
result[index] = copy_of_container._get_particle_unsave(x.key)
elif isinstance(x, GridPoint):
container = x.get_containing_set()
if id(container) in memento:
copy_of_container = memento[id(container)]
else:
copy_of_container = container.copy(memento, keep_structure, filter_attributes)
result[index] = GridPoint(x.index, copy_of_container)
elif isinstance(x, AbstractSet):
if id(x) in memento:
copy_of_container = memento[id(x)]
else:
copy_of_container = x.copy(memento, keep_structure, filter_attributes)
result[index] = copy_of_container
else:
raise exceptions.AmuseException("unkown type in link {0}, copy not implemented".format(type(x)))
index += 1
return result.reshape(self.shape)
def copy_with_link_transfer(self, from_container, to_container, must_copy = False, memento = None, filter_attributes = lambda particle_set, x : True):
from amuse.datamodel.particles import Particle
from amuse.datamodel.grids import GridPoint
if memento is None:
memento = dict()
result = LinkedArray(numpy.empty_like(self))
index = 0
for index in numpy.ndindex(*self.shape):
if len(index) == 1:
index = index[0]
x = self[index]
if x is None:
result[index] = None
elif isinstance(x, Particle):
container = x.get_containing_set()
if from_container is None or container is from_container:
result[index] = to_container._get_particle_unsave(x.key)
else:
result[index] = x
elif isinstance(x, GridPoint):
container = x.get_containing_set()
if from_container is None or container is from_container:
result[index] = GridPoint(x.index, to_container)
else:
result[index] = x
elif isinstance(x, AbstractSet):
if must_copy:
copy_of_container = x.copy(memento, keep_structure = True, filter_attributes = filter_attributes)
result[index] = copy_of_container
else:
if from_container is None or x is from_container:
result[index] = to_container
else:
result[index] = x
else:
raise exceptions.AmuseException("unkown type in link {0}, transfer link not implemented".format(type(x)))
return result
def as_set(self):
from amuse.datamodel.particles import Particle
from amuse.datamodel.particles import ParticlesMaskedSubset
linked_set = None
mask = []
keys = []
index = 0
flattened = LinkedArray(self.flatten())
for x in flattened:
if x is None:
mask.append(True)
keys.append(0)
elif isinstance(x, Particle):
original_set = x.as_set()._original_set()
if linked_set is None:
linked_set = original_set
elif not linked_set is original_set:
raise exceptions.AmuseException(
"could not convert the linked array to a subset as not all particles in the linked array are part of the same set"
)
keys.append(x.key)
mask.append(False)
else:
raise exceptions.AmuseException(
"could not convert the linked array to a subset as this array also contains sets of particles, grids or gridpoints"
)
index += 1
if linked_set is None or len(linked_set) == 0:
dtype = 'uint64'
else:
dtype = linked_set.get_all_keys_in_store().dtype
masked_keys = numpy.ma.masked_array(
numpy.asarray(
keys,
dtype = dtype
),
mask=mask
)
if linked_set is None:
return ParticlesMaskedSubset(None, masked_keys)
else:
return linked_set._masked_subset(masked_keys)
def to_print_list(self):
from amuse.datamodel.particles import Particle
result = []
for x in self:
if x is None:
result.append('--')
elif isinstance(x, Particle):
result.append(x.key)
else:
result.append(type(x))
return result
# this class seems not to be used at all
# in fact it is not clear what it is...
class FixedLinkedArray(LinkedArray):
"""Links between particles and particle sets are stored in LinkedArrays.
"""
def __new__(cls, input_array, linked_set):
result = numpy.asarray(input_array).view(cls)
result.linked_set = linked_set
return result
def __array_finalize__(self, array_object):
if array_object is None:
return
self.linked_set = getattr(array_object, 'linked_set', None)
def copy(self, memento = None, keep_structure = False, filter_attributes = lambda particle_set, x : True):
if memento is None:
memento = {}
if id(self.linked_set) in memento:
copy_of_container = memento[id(self.linked_set)]
else:
copy_of_container = self.linked_set.copy(memento, keep_structure, filter_attributes)
return FixedLinkedArray(numpy.array(self, copy=True), copy_of_container)
def copy_with_link_transfer(self, from_container, to_container, must_copy = False, memento = None, filter_attributes = lambda particle_set, x : True):
if memento is None:
memento = dict()
if must_copy:
raise Exception("unfixed syntax error")
# the following needs to be tested
# was new_container = x.copy(memento, keep_structure = True, filter_attributes = filter_attributes)
new_container = from_container.copy(memento, keep_structure = True, filter_attributes = filter_attributes)
else:
if from_container is None or self.linked_set is from_container:
new_container = to_container
else:
new_container = self.linked_set
return FixedLinkedArray(numpy.array(self, copy=True), new_container)
def as_set(self):
from amuse.datamodel.particles import ParticlesSubset
return self.linked_set._subset(self)
def to_print_list(self):
return list(self)
def get_particles(self, index):
keys = self.__getitem__(index)
if isinstance(keys, numpy.ndarray):
return self.linked_set._subset(keys)
else:
return self.linked_set._get_particle_unsave(keys)
def set_particles(self, index, value):
if not self.linked_set.are_all_keys_in_set(value.as_set().key):
raise Exception("trying to link to a particle that is not in the linked set")
self.__setitem__(index, value.key)
| 64,925
| 35.332401
| 181
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/indexing.py
|
"""
This module provides utility functions for handling
numpy indexing options.
"""
import numpy
import collections
try:
from types import EllipsisType
except:
EllipsisType = type(Ellipsis)
if hasattr(numpy, 'count_nonzero'):
def count_nonzero(array):
return numpy.count_nonzero(array)
else:
def count_nonzero(array):
return len(numpy.flatnonzero(array))
ceil=lambda x,y: (x//y+(x%y>0))
# unpack_slice: get start,stop step infsofar possible w/o length
def unpack_slice(s):
step = 1 if s.step is None else s.step
if step==0: raise ValueError("slice step can't be zero")
if step>0:
start = 0 if s.start is None else s.start
stop = s.stop
else:
start = -1 if s.start is None else s.start
stop = s.stop
return start, stop, step
# resolve_slice: determine consistent start,stop, step given length
# note the difference with slice().indices(length)
def resolve_slice(s,length):
step = 1 if s.step is None else s.step
if step==0: raise ValueError("slice step can't be zero")
if step>0:
start = 0 if s.start is None else s.start
stop = length if s.stop is None else s.stop
if start<0: start=length+start
if start<0: start=0
if stop<0: stop=length+stop
if stop>length: stop=length
if stop<start: stop=start
else:
start = -1 if s.start is None else s.start
stop = -(length+1) if s.stop is None else s.stop
if start>=0: start=-length+start
if start>-1: start=-1
if stop>=0: stop=-length+stop
if stop<-length: stop=-(length+1)
if stop>start: stop=start
return start,stop,step
# combine slices: s1, s2 must be resolved slices!!
# note that it can return a 'unresolved slice'
def combine_slices(s1,s2):
a1,b1,c1=unpack_slice(s1)
a2,b2,c2=unpack_slice(s2)
if b1 is None or b2 is None:
raise Exception("combining slices not possible")
c3=c1*c2
imax= ceil( abs(b1-a1), abs(c1))
if c2<0:
a2=imax+a2
b2=imax+b2
a3=a1+a2*c1
jmax=ceil( abs(b2-a2), abs(c2))
b3=jmax*c3+a3
if a3<0:
if b3>-1: b3=None
else:
if b3<0: b3=None
return a3,b3,c3
def combine_indices(index0, index1):
if index1 is None or index0 is None:
raise Exception("unhandled case, think about numpy")
if isinstance(index0, tuple):
if len(index0) == 1:
index0 = index0[0]
elif isinstance(index1, tuple):
result=[]
offset=0
array_offset=None
for i0 in index0:
if isinstance(i0, (int,numpy.integer)):
result.append(i0)
elif isinstance(i0, numpy.ndarray) and i0.dtype != "bool":
if array_offset is None:
result.append(combine_indices(i0,index1[offset]))
array_offset=offset
offset+=1
else:
result.append(combine_indices(i0,index1[array_offset]))
else:
result.append(combine_indices(i0,index1[offset]))
offset+=1
if offset<len(index1):
result.extend(index1[offset:])
return tuple(result)
else:
index = 0
for i, x in enumerate(index0):
index = i
if isinstance(x, (int,numpy.integer)):
continue
elif isinstance(x, slice):
break
elif isinstance(x, EllipsisType):
break
else:
break
result = []
result.extend(index0[:index])
continuation = combine_indices(index0[index], index1)
if isinstance(continuation, collections.abc.Sequence):
result.extend(continuation)
else:
result.append(continuation)
result.extend(index0[index+1:])
return tuple(result)
if isinstance(index0, (int,numpy.integer)):
if isinstance(index1, tuple):
return (index0,)+index1
else:
return (index0, index1)
elif isinstance(index0, slice):
if isinstance(index1, (int, numpy.integer)):
start,stop,step = unpack_slice(index0)
if index1>=0:
return start + (index1 * step)
else:
imax= ceil( abs(stop-start), abs(step))
stop=start+imax*step
return stop + index1*step
elif isinstance(index1, EllipsisType):
return index0
elif isinstance(index1, numpy.ndarray):
start,stop,step = unpack_slice(index0)
imax= ceil( abs(stop-start), abs(step))
stop=start+imax*step
return start+ (index1 *step)*(index1>=0)+(stop + index1*step)*(index1<0)
else:
if isinstance(index1, slice):
start,stop,step = combine_slices(index0, index1)
return numpy.s_[start:stop:step]
else:
return (combine_indices(index0, index1[0]),)+index1[1:]
elif isinstance(index0, EllipsisType):
if isinstance(index1, slice):
return index1
elif isinstance(index1, EllipsisType):
return index0
elif isinstance(index1, (int, numpy.integer)):
return index1
else:
raise Exception("not handled yet")
elif isinstance(index0, list) or isinstance(index0, numpy.ndarray):
ndarray = numpy.asarray(index0)
if ndarray.dtype == 'bool':
ndarray1 = numpy.zeros_like(ndarray)
ndarray2 = ndarray1[ndarray]
ndarray2[index1] = True
ndarray1[ndarray] = ndarray2
return ndarray1
else:
return index0[index1]
else:
raise Exception("index must be a integer, slice or sequence")
def is_all_int(sequence):
for x in sequence:
if not (isinstance(x, (int, numpy.integer))):
return False
return True
def number_of_dimensions(array, index):
return number_of_dimensions_after_index(array.ndim, index)
def number_of_dimensions_after_index(number_of_dimensions, index):
if isinstance(index, EllipsisType):
return number_of_dimensions
elif isinstance(index, tuple):
if is_all_int(index):
return number_of_dimensions - len(index)
else:
result = number_of_dimensions
arrays=[]
for x in index:
if isinstance(x, EllipsisType):
pass
elif isinstance(x, slice):
pass
elif isinstance(x, numpy.ndarray):
arrays.append(x)
result-=1
else:
result -= 1
if arrays:
b=numpy.broadcast(*arrays)
result+=b.nd
return result
elif isinstance(index, (int,numpy.integer)):
return number_of_dimensions - 1
elif isinstance(index, slice):
return number_of_dimensions
elif isinstance(index, list) or isinstance(index, numpy.ndarray):
ndarray = numpy.asarray(index)
if ndarray.dtype == 'bool':
return number_of_dimensions - len(ndarray.shape) + 1
else:
if isinstance(index, list):
raise Exception("indexing with lists is inconsistent with indexing numpy arrays, hence not permitted atm")
return number_of_dimensions + len(ndarray.shape) - 1
else:
raise Exception("Not handled yet")
def normalize_slices(shape,index):
""" returns index with slice expressions normalized, i.e.
simplified using actual length, replace ellipsis, extend where necessary
"""
if isinstance(index,EllipsisType):
index=(Ellipsis,)
if isinstance(index, tuple):
if is_all_int(index):
return tuple([ind if ind>=0 else shape[i]+ind for i,ind in enumerate(index)])
else:
result = []
first_ellipsis = True
arrays = [x for x in index if isinstance(x,numpy.ndarray)]
for length,x in zip(shape,index):
if isinstance(x, slice):
result.append( slice(*resolve_slice(x,length)) )
elif isinstance(x,EllipsisType):
if first_ellipsis:
n=len(shape)-len(index)+1
result.extend([slice(0,shape[i+len(result)],1) for i in range(n)])
first_ellipsis=False
else:
result.append(slice(*resolve_slice(slice(None),length)))
else:
result.append(x)
n=len(shape)-len(result)
result.extend([slice(0,shape[len(shape)-n+i],1) for i in range(n)])
return tuple(result)
if isinstance(index, slice):
if isinstance(shape,(int,numpy.integer)):
return slice(*resolve_slice(index,shape))
else:
return normalize_slices(shape,(index,))
else:
return index
def shape_after_index(shape, index):
index=normalize_slices(shape,index)
if isinstance(index, tuple):
if is_all_int(index):
return tuple(shape[len(index):])
else:
if len(index) != len(shape):
raise Exception("should not be possible")
shape_as_list = list(shape)
result = []
arrays = [x for x in index if isinstance(x,numpy.ndarray)]
for i,x in enumerate(index):
if isinstance(x, slice):
start,stop,step = resolve_slice(x, shape_as_list[i])
if step>0:
nitems = (stop - 1 - start) // step + 1
else:
nitems = (start- stop -1) // (-step) + 1
result.append(nitems)
elif isinstance(x, numpy.ndarray):
if arrays:
b=numpy.broadcast(*arrays)
result+=b.shape
arrays=None
else:
pass
return tuple(result)
elif isinstance(index, (int,numpy.integer)):
return tuple(shape[1:])
elif isinstance(index, slice):
return shape_after_index(shape,(index,))
elif isinstance(index, list) or isinstance(index, numpy.ndarray):
ndarray = numpy.asarray(index)
if ndarray.dtype == 'bool':
if ndarray.shape == shape:
return (count_nonzero(ndarray),)
if len(ndarray.shape) < len(shape):
if not ndarray.shape == shape[:len(ndarray.shape)]:
raise Exception("Shape is not compatible")
result = list(shape[len(ndarray.shape):])
result.insert(0, count_nonzero(ndarray))
return tuple(result)
else:
raise Exception("Not handled yet")
else:
if isinstance(index, list):
raise Exception("indexing with lists is inconsistent with indexing numpy array, hence not permitted atm")
return ndarray.shape+shape[1:]
#~ return numpy.zeros(shape)[ndarray].shape # this is cheating a bit..
else:
raise Exception("Not handled yet")
def split_numpy_index_over_dimensions(index, dimension_values):
"""given a numpy index and a list of dimension values (the accepted values
per dimension, return the selected values per dimension, values are always
arrays"""
result = list(dimension_values)
if isinstance(index, (int,numpy.integer)):
result[0] = result[0][index]
return result
elif isinstance(index, slice):
result[0] = result[0][index]
return result
elif isinstance(index, tuple):
if is_all_int(index):
for i, x in enumerate(index):
result[i] = result[i][x]
return result
else:
number_of_indices = len(index)
i = 0
for x in index:
if isinstance(x, (int,numpy.integer)):
result[i] = result[i][x]
elif x is Ellipsis:
result[i] = result[i]
for _ in range(len(dimension_values) - number_of_indices):
i += 1
result[i] = result[i]
number_of_indices += 1
elif isinstance(x, slice):
result[i] = result[i][x]
i += 1
return result
else:
raise Exception("Not handled yet")
| 13,000
| 34.914365
| 122
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/staggeredgrid.py
|
from amuse.units import units
from amuse.support import exceptions
import numpy
from amuse.datamodel.grids import *
class StaggeredGrid(object):
def __init__(self, elements, nodes = None, get_corners=None):
self.elements=elements
self.nodes=nodes
self._get_corners_func = get_corners
self.get_corners()
def new_remapping_channel_to(self, other, remapper):
return GridRemappingChannel(self, other, remapper)
def get_attribute_names_defined_in_store(self):
elem = self.elements.get_attribute_names_defined_in_store()
node = self.nodes.get_attribute_names_defined_in_store()
return list(set(elem).union(set(node)))
def get_defined_settable_attribute_names(self):
elem = self.elements.get_defined_settable_attribute_names()
node = self.nodes.get_defined_settable_attribute_names()
return list(set(elem).union(set(node)))
def get_corners(self):
elements = self.elements
nodes = self.nodes
#if a replacing get_corners method is defined for this grid call it and return
if not (self._get_corners_func is None):
corners = self._get_corners_func()
if not hasattr(elements, '_cell_corners'):
object.__setattr__(elements, "_cell_corners", corners)
self.corners = corners
return corners
#element and node grids should be of the same type
if type(elements) != type(nodes):
raise Exception("elements and nodes grids should be of the same type")
#get set of participating axes names
dims = elements.number_of_dimensions()
if dims > 2:
raise Exception("Staggered grids with more than 2 dimensions are currently not supported")
#this is mainly because we can only access the coordinates in
#a dimensions by their attribute names 'lat' and 'lon'
#if the _axes_names were defined properly for in-code stored grids I could do something like
# xpos=to_quantity(getattr(nodes,self._axes_names[0])), but currently that's not possible right now
#structured or unstructured
if type(elements) is StructuredGrid:
#structured grid assumes that the nodes are at the cell corners
#of the elements grid and therefore the nodes grid should be
#exactly 1 gridpoint larger in each dimension to fully encapsulate
#the element grid
if len(elements.shape) != len(nodes.shape):
raise Exception("elements and nodes should have the same number of the dimensions")
for i in range(len(elements.shape)):
if elements.shape[i]+1 != nodes.shape[i]:
raise Exception("nodes grid should be exactly 1 grid point larger than element grid in each dimension")
corners = numpy.zeros([dims] + list(nodes.shape), dtype=numpy.double)
#use node positions as corner positions
corners[0] = nodes.lon.value_in(units.rad)
corners[1] = nodes.lat.value_in(units.rad)
elif type(elements) == UnstructuredGrid:
#the following bit of code tries to access 'n0', 'n1' up to 'n9' of the elements grid
#a cleaner implementation would be to call get_element_nodes() directly, but we can not do that from here
attributes = elements.all_attributes()
max_corners = 10
corner_indices = []
for i in range(max_corners):
node = 'n' + str(i)
if node in attributes:
corner_indices.append(getattr(elements, node))
self.num_corners = num_corners = len(corner_indices)
object.__setattr__(elements,"_num_corners", num_corners)
self.corner_indices = corner_indices
size = elements.size
self.inverse_mapping = inverse_mapping = [[] for i in range(nodes.size)]
#only 2 dimensions supported currently
corners = numpy.zeros((2, size*num_corners), dtype=numpy.double)
node_lon = nodes.lon.value_in(units.rad)
node_lat = nodes.lat.value_in(units.rad)
for i in range(size):
for d in range(num_corners):
n = corner_indices[d][i]
inverse_mapping[n].append(i)
nlon = node_lon[n]
corners[0][i*num_corners+d] = nlon
nlat = node_lat[n]
corners[1][i*num_corners+d] = nlat
else:
raise Exception("unknown grid type for elements: should be either StructuredGrid or UnstructuredGrid")
if not hasattr(elements, '_cell_corners'):
object.__setattr__(elements, "_cell_corners", corners)
self.corners = corners
return corners
def map_elements_to_nodes_structured_larger(self, elements, nodes, elem_values):
node_values = numpy.zeros(self.nodes.shape, dtype=numpy.float64)
node_values[1:,1:] = elem_values[:,:]
node_values[0,:] = 0.0
#assume the grid is cyclic east-west
node_values[1:,0] = elem_values[:,-1]
return node_values
def map_elements_to_nodes_structured_same_size(self, elements, nodes, elem_values):
node_values = numpy.zeros(self.nodes.shape, dtype=numpy.float64)
node_values = elem_values[:]
return node_values
def map_elements_to_nodes(self, element_values):
#currently very rough remapping schemes, more sophisticated methods will be added later
if not hasattr(self, 'corners'):
self.get_corners()
elements = self.elements
nodes = self.nodes
element_values = element_values.reshape(elements.shape)
if type(elements) is StructuredGrid:
if len(elements.shape) != len(nodes.shape):
raise Exception("elements and nodes should have the same number of the dimensions")
if numpy.all([s1+1==s2 for s1,s2 in zip(elements.shape,nodes.shape)]):
return self.map_elements_to_nodes_structured_larger(elements, nodes, element_values)
if numpy.all([s1==s2 for s1,s2 in zip(elements.shape,nodes.shape)]):
return self.map_elements_to_nodes_structured_same_size(elements, nodes, element_values)
else:
raise Exception("nodes grid should have either exactly same shape or 1 grid point larger than element grid in each dimension")
elif type(elements) == UnstructuredGrid:
if (len(element_values) != self.elements.size):
raise Exception("number of values passed does not match size of elements grid")
#do a simple average value of the elements around the node
node_values = numpy.zeros(self.nodes.size, dtype=numpy.float64)
for i in range(len(node_values)):
num_neighbors = len(self.inverse_mapping[i])
value = 0.0
#add value of neighboring element
for neighbor in self.inverse_mapping[i]:
value += element_values[neighbor]
#store result
node_values[i] = value / (1.0*num_neighbors)
else:
raise Exception("unknown grid type for elements: should be either StructuredGrid or UnstructuredGrid")
return node_values
#this method is for structured grids where the nodes grid is exactly 1 grid point larger in each dimension
def map_nodes_to_elements_structured_larger(self, elements, nodes, node_values):
#do simple translation/shift of the values from the north-east corners of each grid cell to the cell centers
elem_values = numpy.zeros(self.elements.shape, dtype=numpy.float64)
elem_values = node_values[1:,1:]
return elem_values
#this method is for structured grids where the nodes grid is of the same size as the elements grid, if so
#the grid is assumed to be cyclic east-west
def map_nodes_to_elements_structured_same_size(self, elements, nodes, node_values):
#do simple translation/shift of the values from the north-east corners of each grid cell to the cell centers
elem_values = numpy.zeros(self.elements.shape, dtype=numpy.float64)
elem_values = node_values.flatten()
return elem_values
def map_nodes_to_elements(self, node_values):
if not hasattr(self, 'corners'):
self.get_corners()
elements = self.elements
nodes = self.nodes
node_values = node_values.reshape(nodes.shape)
if type(elements) is StructuredGrid:
if len(elements.shape) != len(nodes.shape):
raise Exception("elements and nodes should have the same number of the dimensions")
if numpy.all([s1+1==s2 for s1,s2 in zip(elements.shape,nodes.shape)]):
return self.map_nodes_to_elements_structured_larger(elements, nodes, node_values)
if numpy.all([s1==s2 for s1,s2 in zip(elements.shape,nodes.shape)]):
return self.map_nodes_to_elements_structured_same_size(elements, nodes, node_values)
else:
raise Exception("nodes grid should have either exactly same shape or 1 grid point larger than element grid in each dimension")
elif type(elements) == UnstructuredGrid:
if (len(node_values) != self.nodes.size):
raise Exception("number of values passed does not match size of nodes grid")
elem_values = numpy.zeros(self.elements.size, dtype=numpy.float64)
#do a simple average value of the nodes around the element
for i in range(len(elem_values)):
value = 0.0
for c in range(self.num_corners):
index = self.corner_indices[c][i]
value += node_values[index]
elem_values[i] = value / (1.0*self.num_corners)
else:
raise Exception("unknown grid type for elements: should be either StructuredGrid or UnstructuredGrid")
return elem_values
| 10,234
| 45.103604
| 142
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/attributes.py
|
from amuse.units import si
from amuse.units import units
from amuse.units import nbody_system
import numpy
class AttributeDefinition(object):
def __init__(self, name, description, unit, default_value):
self.name = name
self.description = description
self.unit = unit
self.default_value = default_value
class DomainMetaclass(type):
def __new__(metaclass, name, bases, dict):
replacement_dictionary = {}
for key, value in dict.items():
if isinstance(value, tuple):
default_value, description = value
replacement_dictionary[key] = AttributeDefinition(
key, description,
default_value.unit, default_value)
else:
replacement_dictionary[key] = value
return type.__new__(metaclass, name, bases, dict)
class Domain(object, metaclass=DomainMetaclass):
time = 0.0 | si.s, "model time"
class Gravity(Domain):
mass = 0.0 | si.kg, "the mass of a star"
position = [0.0, 0.0, 0.0] | si.m, "the position vector of a star"
velocity = [0.0, 0.0, 0.0] | si.m / si.s, "the velocity vector of a star"
radius = 0.0 | si.m, "the radius of a star"
acceleration = [0.0, 0.0, 0.0] | si.m / (si.s ** 2), "the acceleraration vector of a star"
class Hydrodynamics(Domain):
pressure = 0.0 | units.Pa, "the pressure in a region of space"
density = 0.0 | si.kg / (si.m ** 3), "the density of molecules or solid matter"
temperature = 0.0 | si.K, "the temperature of the gas"
magnetic_field = 0.0 | units.tesla, "magnetic field created by gas and stars"
velocity_field = 0.0 | si.m / si.s, "velocity of the gas"
gravity_potential = 0.0 | si.no_unit, "gravity forces from stars and gas"
viscosity = 0.0 | si.no_unit, "viscosity of the gas cloud"
class RadiativeTransfer(Domain):
temperature_gas = 0.0 | si.K, "the temperature of the gas"
temperature_dust = 0.0 | si.K, "the temperature of the dust"
temperature_background = 0.0 | si.K, "the temperature of the background"
density = 0.0 | si.mol / (si.m**3), "modulecular density"
magnetic_field = 0.0 | units.tesla, "magnetic field created by gas and stars"
velocity_field = 0.0 | si.m / si.s, "velocity of the gas"
class StellarEvolution(Domain):
mass = 0.0 | si.kg, "the mass of a star"
radius = 0.0 | si.m, "the radius of a star"
age = 0.0 | si.s, "the age of a star, time evolved since star formation"
class SseCode(StellarEvolution):
zams_mass = 0.0 | si.kg, "the mass of a star after formation"
type = 0 | si.no_unit, "stars evolve through typical stages, during each stage one can classify a star as belonging to a specific type"
luminosity = 0.0 | si.cd / (si.m ** 2), "brightness of a star"
radius = 0.0 | si.m, "total radius of a star"
core_mass = 0.0 | si.kg, "mass of the innermost layer of a star"
core_radius = 0.0 | si.m, "radius of the innermost layer of a star"
envelope_mass = 0.0 | si.kg, "mass of the radiative / convective envelope around the core of a star"
envelope_radius = 0.0 | si.m, "radius of the radiative / convective envelope around the core of a star"
spin = 0.0 | si.m / si.s, "speed of rotation around the central axis of a star"
epoch = 0.0 | si.s, "set when a star changes type"
physical_time = 0.0 | si.s, "age of a star relative to last change of type"
| 3,473
| 44.710526
| 139
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/code_particles.py
|
"""
obsolete, renamed to code_storage
"""
| 42
| 9.75
| 33
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/particle_attributes.py
|
import numpy
from collections import namedtuple
from amuse.units import nbody_system
from amuse.units import quantities
from amuse.units import constants
from amuse.units import units
from amuse.units.quantities import zero
from amuse.units.quantities import VectorQuantity
from amuse.units.quantities import Quantity
from amuse.units.quantities import new_quantity
from amuse.units.quantities import zero
from amuse.support import exceptions
from amuse.ext.basicgraph import Graph, MinimumSpanningTreeFromEdges, MinimumSpanningTree
from amuse.datamodel import base
from amuse.datamodel import rotation
from amuse.datamodel import ParticlesWithUnitsConverted, AbstractParticleSet, Particle
from functools import reduce
def move_to_center(particles):
"""
Shift positions and velocities of the particles such that their
center of mass (velocity) is centered on the origin.
Implemented as:
particles.position -= particles.center_of_mass()
particles.velocity -= particles.center_of_mass_velocity()
"""
particles.position -= particles.center_of_mass()
particles.velocity -= particles.center_of_mass_velocity()
def scale_to_standard(particles, convert_nbody = None,
smoothing_length_squared = zero,
virial_ratio = 0.5):
"""
Scale the particles to a standard NBODY model with G=1,
total_mass=1, and virial_radius=1 (or potential_energy=-0.5).
In virial equilibrium (virial_ratio=0.5, default) the
kinetic_energy=0.25 and the velocity_dispersion=1/sqrt(2).
:argument convert_nbody: the scaling is in nbody units,
when the particles are in si units a convert_nbody is needed
:argument smoothing_length_squared: needed for calculating
the potential energy correctly.
:argument virial_ratio: scale velocities to Q=K/|U|, (kinetic/potential energy);
Q = virial_ratio > 0.5: supervirial, will expand
Q = virial_ratio < 0.5: subvirial, will collapse
"""
if not convert_nbody is None:
particles = ParticlesWithUnitsConverted(particles, convert_nbody.as_converter_from_generic_to_si())
if not smoothing_length_squared is zero:
smoothing_length_squared = convert_nbody.to_nbody(smoothing_length_squared)
# Proper order is to scale mass, then length, then velocities.
# Simple length scaling for the potential works only in the
# unsoftened case. In general, it may not be possible to force
# the potential to -0.5, so perhaps best to stop after the simple
# scaling. We can always scale the velocities to get the correct
# virial ratio (and hence virial equilibrium).
total_mass = particles.mass.sum()
scale_factor = ((1 | total_mass.unit) / total_mass)
particles.mass *= scale_factor
potential_energy \
= particles.potential_energy(G=nbody_system.G,
smoothing_length_squared = smoothing_length_squared)
target_energy = -0.5 | nbody_system.energy
scale_factor = (potential_energy / target_energy) # unsoftened only...
particles.position *= scale_factor
if smoothing_length_squared == zero:
potential_energy = target_energy
else:
potential_energy = particles.potential_energy(G=nbody_system.G,
smoothing_length_squared = smoothing_length_squared)
if virial_ratio == 0:
scale_factor = 0
else:
scale_factor = numpy.sqrt(abs(virial_ratio*potential_energy) / particles.kinetic_energy())
particles.velocity *= scale_factor
def center_of_mass(particles):
"""
Returns the center of mass of the particles set.
The center of mass is defined as the average
of the positions of the particles, weighted by their masses.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.x = [-1.0, 1.0] | units.m
>>> particles.y = [0.0, 0.0] | units.m
>>> particles.z = [0.0, 0.0] | units.m
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.center_of_mass()
quantity<[0.0, 0.0, 0.0] m>
"""
masses = particles.mass
position=particles.position
total_mass = masses.sum()
return (position * masses.reshape((len(masses),1))).sum(0) / total_mass
def center_of_mass_velocity(particles):
"""
Returns the center of mass velocity of the particles set.
The center of mass velocity is defined as the average
of the velocities of the particles, weighted by their masses.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.vx = [-1.0, 1.0] | units.ms
>>> particles.vy = [0.0, 0.0] | units.ms
>>> particles.vz = [0.0, 0.0] | units.ms
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.center_of_mass_velocity()
quantity<[0.0, 0.0, 0.0] m * s**-1>
"""
masses = particles.mass
velocity=particles.velocity
total_mass = masses.sum()
return (velocity * masses.reshape((len(masses),1))).sum(0) / total_mass
def total_momentum(particles):
"""
Returns the total momentum of the particles set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.vx = [-1.0, 1.0] | units.ms
>>> particles.vy = [0.0, 0.0] | units.ms
>>> particles.vz = [0.0, 0.0] | units.ms
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.total_momentum()
quantity<[0.0, 0.0, 0.0] m * kg * s**-1>
"""
masses = particles.mass
vel=particles.velocity
momx = (masses * vel[:,0]).sum()
momy = (masses * vel[:,1]).sum()
momz = (masses * vel[:,2]).sum()
return quantities.VectorQuantity.new_from_scalar_quantities(momx,
momy, momz)
def total_angular_momentum(particles):
"""
Returns the total angular momentum of the particles set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.x = [-1.0, 1.0] | units.m
>>> particles.y = [0.0, 0.0] | units.m
>>> particles.z = [0.0, 0.0] | units.m
>>> particles.vx = [0.0, 0.0] | units.ms
>>> particles.vy = [-1.0, 1.0] | units.ms
>>> particles.vz = [0.0, 0.0] | units.ms
>>> particles.mass = [1.0, .5] | units.kg
>>> particles.total_angular_momentum()
quantity<[0.0, 0.0, 1.5] m**2 * kg * s**-1>
"""
# equivalent to:
# lx=(m*(y*vz-z*vy)).sum()
# ly=(m*(z*vx-x*vz)).sum()
# lz=(m*(x*vy-y*vx)).sum()
return (particles.mass.reshape((-1,1)) *particles.position.cross(particles.velocity)).sum(axis=0)
def moment_of_inertia(particles):
"""
Returns the total moment of inertia (about the Z axis) of the particle
set.
"""
m = particles.mass
x = particles.x
y = particles.y
return (m * (x**2 + y**2)).sum()
def kinetic_energy(particles):
"""
Returns the total kinetic energy of the
particles in the particles set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.vx = [-1.0, 1.0] | units.ms
>>> particles.vy = [0.0, 0.0] | units.ms
>>> particles.vz = [0.0, 0.0] | units.ms
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.kinetic_energy()
quantity<1.0 m**2 * kg * s**-2>
"""
if len(particles) < 1:
return zero
mass = particles.mass
vx = particles.vx
vy = particles.vy
vz = particles.vz
v_squared = (vx * vx) + (vy * vy) + (vz * vz)
m_v_squared = mass * v_squared
return 0.5 * m_v_squared.sum()
def potential_energy(particles, smoothing_length_squared = zero, G = constants.G):
"""
Returns the total potential energy of the particles in the particles set.
:argument smooting_length_squared: gravitational softening, added to every distance**2.
:argument G: gravitational constant, need to be changed for particles in different units systems
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.x = [0.0, 1.0] | units.m
>>> particles.y = [0.0, 0.0] | units.m
>>> particles.z = [0.0, 0.0] | units.m
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.potential_energy()
quantity<-6.67428e-11 m**2 * kg * s**-2>
"""
if len(particles) < 2:
return zero
mass = particles.mass
x_vector = particles.x
y_vector = particles.y
z_vector = particles.z
sum_of_energies = zero
for i in range(len(particles) - 1):
x = x_vector[i]
y = y_vector[i]
z = z_vector[i]
dx = x - x_vector[i+1:]
dy = y - y_vector[i+1:]
dz = z - z_vector[i+1:]
dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
dr = (dr_squared+smoothing_length_squared).sqrt()
m_m = mass[i] * mass[i+1:]
energy_of_this_particle = (m_m / dr).sum()
sum_of_energies -= energy_of_this_particle
return G * sum_of_energies
def thermal_energy(particles):
"""
Returns the total internal energy of the (gas)
particles in the particles set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.u = [0.5, 0.5] | units.ms**2
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.thermal_energy()
quantity<1.0 m**2 * kg * s**-2>
"""
return (particles.mass * particles.u).sum()
def particle_specific_kinetic_energy(set, particle):
"""
Returns the specific kinetic energy of the particle.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.vx = [0.0, 1.0] | units.ms
>>> particles.vy = [0.0, 0.0] | units.ms
>>> particles.vz = [0.0, 0.0] | units.ms
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles[1].specific_kinetic_energy()
quantity<0.5 m**2 * s**-2>
"""
return 0.5*(particle.velocity**2).sum()
def specific_kinetic_energy(particles):
"""
Returns the specific kinetic energy of each particle in the set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.vx = [1.0, 1.0] | units.ms
>>> particles.vy = [0.0, 0.0] | units.ms
>>> particles.vz = [0.0, 0.0] | units.ms
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.specific_kinetic_energy()
quantity<[0.5, 0.5] m**2 * s**-2>
"""
return 0.5*(particles.vx**2+particles.vy**2+particles.vz**2)
def particle_potential(set, particle, smoothing_length_squared = zero, G = constants.G):
"""
Returns the potential at the position of the particle.
:argument smooting_length_squared: gravitational softening, added to every distance**2.
:argument G: gravitational constant, need to be changed for particles in different units systems
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.x = [0.0, 1.0] | units.m
>>> particles.y = [0.0, 0.0] | units.m
>>> particles.z = [0.0, 0.0] | units.m
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles[1].potential()
quantity<-6.67428e-11 m**2 * s**-2>
"""
particles = set - particle
dx = particle.x - particles.x
dy = particle.y - particles.y
dz = particle.z - particles.z
dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
dr = (dr_squared+smoothing_length_squared).sqrt()
return - G * (particles.mass / dr).sum()
def particleset_potential(particles, smoothing_length_squared = zero, G = constants.G, gravity_code = None, block_size = 0):
"""
Returns the potential at the position of each particle in the set.
:argument smooting_length_squared: gravitational softening, added to every distance**2.
:argument G: gravitational constant, need to be changed for particles in different units systems
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.x = [0.0, 1.0] | units.m
>>> particles.y = [0.0, 0.0] | units.m
>>> particles.z = [0.0, 0.0] | units.m
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.potential()
quantity<[-6.67428e-11, -6.67428e-11] m**2 * s**-2>
"""
n = len(particles)
if block_size == 0:
max = 100000 * 100 #100m floats
block_size = max // n
if block_size == 0:
block_size = 1 #if more than 100m particles, then do 1 by one
mass = particles.mass
x_vector = particles.x
y_vector = particles.y
z_vector = particles.z
potentials = VectorQuantity.zeros(len(mass),mass.unit/x_vector.unit)
inf_len = numpy.inf | x_vector.unit
offset = 0
newshape =(n, 1)
x_vector_r = x_vector.reshape(newshape)
y_vector_r = y_vector.reshape(newshape)
z_vector_r = z_vector.reshape(newshape)
mass_r=mass.reshape(newshape)
while offset < n:
if offset + block_size > n:
block_size = n - offset
x = x_vector[offset:offset+block_size]
y = y_vector[offset:offset+block_size]
z = z_vector[offset:offset+block_size]
indices = numpy.arange(block_size)
dx = x_vector_r - x
dy = y_vector_r - y
dz = z_vector_r - z
dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
dr = (dr_squared+smoothing_length_squared).sqrt()
index = (indices + offset, indices)
dr[index] = inf_len
potentials += (mass[offset:offset+block_size]/dr).sum(axis=1)
offset += block_size
return -G * potentials
def virial_radius(particles):
"""
Returns the virial radius of the particles set.
The virial radius is the inverse of the average inverse
distance between particles, weighted by their masses.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.x = [-1.0, 1.0] | units.m
>>> particles.y = [0.0, 0.0] | units.m
>>> particles.z = [0.0, 0.0] | units.m
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.virial_radius()
quantity<4.0 m>
"""
if len(particles) < 2:
raise exceptions.AmuseException("Cannot calculate virial radius for a particles set with fewer than 2 particles.")
partial_sum = zero
mass = particles.mass
x_vector = particles.x
y_vector = particles.y
z_vector = particles.z
for i in range(len(particles) - 1):
x = x_vector[i]
y = y_vector[i]
z = z_vector[i]
dx = x - x_vector[i+1:]
dy = y - y_vector[i+1:]
dz = z - z_vector[i+1:]
dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
dr = (dr_squared).sqrt()
m_m = mass[i] * mass[i+1:]
partial_sum += (m_m / dr).sum()
return (mass.sum()**2) / (2 * partial_sum)
def total_mass(particles):
"""
Returns the total mass of the particles set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(3)
>>> particles.mass = [1.0, 2.0, 3.0] | units.kg
>>> particles.total_mass()
quantity<6.0 kg>
"""
return particles.mass.sum()
def total_radius(particles):
"""
Returns the total radius (maximum distance from center) of the particles set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(3)
>>> particles.mass = [1.0, 2.0, 3.0] | units.kg
>>> particles.position = [0.0, 0.0, 0.0] | units.m
>>> particles.x = [0.0, 3.0, 6.0] | units.m
>>> particles.total_radius()
quantity<4.0 m>
"""
return (particles.position - particles.center_of_mass()).lengths_squared().amax().sqrt()
# move_to_center??
def get_binaries(particles,hardness=10,G = constants.G):
"""
returns the binaries in a particleset. binaries are selected according to a hardness criterion [hardness=10]
This function returns the binaries as a list of i,j particles. Triple detection is not done.
>>> from amuse import datamodel
>>> m = [1,1,1] | units.MSun
>>> x = [-1,1,0] | units.AU
>>> y = [0,0,1000] | units.AU
>>> z = [0,0,0] | units.AU
>>> vx = [0,0,0] | units.kms
>>> vy = [1.,-1.,0] | units.kms
>>> vz = [0,0,0] | units.kms
>>> particles = datamodel.create_particle_set( mass=m,x=x,y=y,z=z,vx=vx,vy=vy,vz=vz )
>>> binaries = particles.get_binaries()
>>> print len(binaries)
1
"""
n=len(particles)
total_Ek=(0.5*particles.mass*(particles.vx**2+particles.vy**2+particles.vz**2)).sum()
average_Ek=total_Ek/particles.mass.sum()
max_mass=particles.mass.amax()
limitE=hardness*average_Ek
a=numpy.argsort(particles.x.number)
binaries=[]
for i in range(n-1):
j=i+1
while j<n and (particles.x[a[j]]-particles.x[a[i]])<2*G*max_mass/limitE:
r2=(particles.x[a[j]]-particles.x[a[i]])**2+ \
(particles.y[a[j]]-particles.y[a[i]])**2+ \
(particles.z[a[j]]-particles.z[a[i]])**2
v2=(particles.vx[a[j]]-particles.vx[a[i]])**2+ \
(particles.vy[a[j]]-particles.vy[a[i]])**2+ \
(particles.vz[a[j]]-particles.vz[a[i]])**2
r=r2**0.5
eb=G*(particles.mass[a[i]]+particles.mass[a[j]])/r-0.5*v2
if eb > limitE:
binary=particles[[a[i],a[j]]].copy()
binary.hardness=eb/average_Ek
binaries.append(binary)
j+=1
return binaries
class HopContainer(object):
def __init__(self):
self.code = None
self.hop_factory = None
def initialize(self, unit_converter):
if self.code is None or self.code.get_name_of_current_state() == "STOPPED":
if self.hop_factory is None:
from amuse.community.hop.interface import Hop
self.hop_factory = Hop
self.code = self.hop_factory(unit_converter)
else:
if len(self.code.particles) > 0:
self.code.particles.remove_particles(self.code.particles)
def densitycentre_coreradius_coredens(particles, unit_converter=None, number_of_neighbours=7,
reuse_hop=False, hop=HopContainer()):
"""
calculate position of the density centre, coreradius and coredensity
>>> import numpy
>>> from amuse.ic.plummer import new_plummer_sphere
>>> numpy.random.seed(1234)
>>> particles=new_plummer_sphere(100)
>>> pos,coreradius,coredens=particles.densitycentre_coreradius_coredens()
>>> print coreradius
0.404120092331 length
"""
if isinstance(hop, HopContainer):
hop.initialize(unit_converter)
hop = hop.code
try:
hop.particles.add_particles(particles)
except Exception as ex:
hop.stop()
raise exceptions.AmuseException(str(ex)+" (note: check whether Hop needs a converter here)")
hop.parameters.density_method=2
hop.parameters.number_of_neighbors_for_local_density=number_of_neighbours
hop.calculate_densities()
density=hop.particles.density
x=hop.particles.x
y=hop.particles.y
z=hop.particles.z
rho=density.amax()
total_density=numpy.sum(density)
x_core=numpy.sum(density*x)/total_density
y_core=numpy.sum(density*y)/total_density
z_core=numpy.sum(density*z)/total_density
rc = (density * ((x-x_core)**2+(y-y_core)**2+(z-z_core)**2).sqrt()).sum() / total_density
if not reuse_hop:
hop.stop()
return VectorQuantity.new_from_scalar_quantities(x_core,y_core,z_core), rc, rho
def new_particle_from_cluster_core(particles, unit_converter=None, density_weighting_power=2, cm=None,
reuse_hop=False, hop=HopContainer()):
"""
Uses Hop to find the density centre (core) of a particle distribution
and stores the properties of this core on a particle:
position, velocity, (core) radius and (core) density.
Particles are assigned weights that depend on the density (as determined by
Hop) to a certain power.
The default weighting power is 2, which is most commonly used. Set
density_weighting_power to 1 in order to get the original weighting of
Casertano & Hut (1985, ApJ, 298, 80).
:argument unit_converter: Required if the particles are in SI units
:argument density_weighting_power: Particle properties are weighted by density to this power
"""
if isinstance(hop, HopContainer):
hop.initialize(unit_converter)
hop = hop.code
in_hop = hop.particles.add_particles(particles)
hop.parameters.density_method = 2
hop.parameters.number_of_neighbors_for_local_density = 7
hop.calculate_densities()
density = in_hop.density.copy()
if not reuse_hop:
hop.stop()
weights = (density**density_weighting_power).reshape((-1,1))
# Reshape makes sure that density can be multiplied with vectors, e.g. position
result = Particle()
result.density = density.amax()
total_weight = weights.sum()
if cm is None:
result.position = (weights * particles.position).sum(axis=0) / total_weight
else:
result.position = cm
result.velocity = (weights * particles.velocity).sum(axis=0) / total_weight
result.radius = (weights.flatten() * (particles.position - result.position).lengths()).sum() / total_weight
return result
def bound_subset(particles, tidal_radius=None, unit_converter=None, density_weighting_power=2,
smoothing_length_squared=zero, G=constants.G, core=None,
reuse_hop=False, hop=HopContainer(), gravity_code=None):
"""
find the particles bound to the cluster. Returns a subset of bound particles.
:argument tidal_radius: particles beyond this are considered not bound
:argument unit_converter: Required if the particles are in SI units
:argument density_weighting_power: Particle properties are weighted by density to this power
:argument smooting_length_squared: the smoothing length for gravity.
:argument G: gravitational constant, need to be changed for particles in different units systems
:argument core: (optional) core of the cluster
>>> from amuse.ic.plummer import new_plummer_model
>>> from amuse.units import nbody_system
>>> plum=new_plummer_model(100)
>>> print len(plum.bound_subset(G=nbody_system.G))
100
>>> plum[0].velocity*=100
>>> plum[0].position*=100
>>> print len(plum.bound_subset(G=nbody_system.G))
99
"""
if core is None:
core = particles.cluster_core(unit_converter, density_weighting_power, reuse_hop=reuse_hop, hop=hop)
position=particles.position-core.position
velocity=particles.velocity-core.velocity
v2=velocity.lengths_squared()
r2=position.lengths_squared()
pot=particles.potential(smoothing_length_squared, G, gravity_code = gravity_code)
if tidal_radius is None:
boundary_radius2=r2.max()
else:
boundary_radius2=tidal_radius**2
bs=numpy.where( (r2 <= boundary_radius2) & (pot+0.5*v2 < zero) )[0]
return particles[bs]
def mass_segregation_Gini_coefficient(particles, unit_converter=None, density_weighting_power=2,
core=None, reuse_hop=False, hop=HopContainer()):
"""
Converse & Stahler 2008 Gini coefficient for cluster.
:argument unit_converter: Required if the particles are in SI units
:argument density_weighting_power: Particle properties are weighted by density to this power
:argument core: (optional) core of the cluster
>>> import numpy
>>> from amuse.ic.plummer import new_plummer_model
>>> from amuse.units import nbody_system
>>> plum=new_plummer_model(100)
>>> index=plum.position.lengths_squared().argmin()
>>> plum.mass=0|nbody_system.mass
>>> plum[index].mass=1|nbody_system.mass
>>> print plum.mass_segregation_Gini_coefficient()
1.0
"""
if core is None:
core = particles.cluster_core(unit_converter, density_weighting_power, reuse_hop=reuse_hop, hop=hop)
position=particles.position-core.position
r2=position.lengths_squared().number
a=numpy.argsort(r2)
m=particles.mass.number[a]
nf=1.*numpy.array(list(range(len(m))))/(len(m)-1.)
mf=m.cumsum()
mf=mf/mf[-1]
mfmnf=2*(mf-nf)
return (mfmnf[1:]+mfmnf[:-1]).sum()/2/(len(mf)-1.)
def LagrangianRadii(
stars, unit_converter="auto", mf=[0.01,0.02,0.05,0.1,0.2,0.5,0.75,0.9,1],
cm=None, number_of_neighbours=7, reuse_hop=False, hop=HopContainer(),
):
"""
Calculate lagrangian radii. Output is radii, mass fraction
>>> import numpy
>>> from amuse.ic.plummer import new_plummer_sphere
>>> numpy.random.seed(1234)
>>> parts=new_plummer_sphere(100)
>>> lr,mf=parts.LagrangianRadii()
>>> print lr[5]
0.856966667972 length
"""
import bisect
if unit_converter == "auto":
# Try to determine the right unit base, using the mass
mass_base_unit = stars.mass.unit.base[0][1]
if mass_base_unit is units.kg:
# Set converter lengths to be something that seems reasonable
converter_mass = stars.total_mass() / len(stars)
converter_length = (stars.position - stars.center_of_mass()).lengths().mean()
unit_converter = nbody_system.nbody_to_si(
converter_mass, converter_length,
)
elif mass_base_unit is nbody_system.mass:
unit_converter = None
else:
unit_converter = None
if cm is None:
cm,rcore,rhocore = stars.densitycentre_coreradius_coredens(
unit_converter=unit_converter,
number_of_neighbours=number_of_neighbours,
reuse_hop=reuse_hop, hop=hop
)
cmx,cmy,cmz=cm
r2=(stars.x-cmx)**2+(stars.y-cmy)**2+(stars.z-cmz)**2
a=numpy.argsort(r2.number)
rsorted=r2[a]**0.5
msorted=stars.mass[a].number
mcum=msorted.cumsum()
lr=cmx.unit([])
for f in mf:
i=bisect.bisect(mcum,mcum[-1]*f)
if i<=0:
lr.append(rsorted[0]/2.)
else:
lr.append(rsorted[i-1])
return lr,mf
def find_closest_particle_to(particles,x,y,z):
"""
return closest particle to x,y,z position
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.x = [0.0, 2.0] | units.m
>>> particles.y = [0.0, 0.0] | units.m
>>> particles.z = [0.0, 0.0] | units.m
>>> print particles.find_closest_particle_to( -1 | units.m,0.| units.m,0.| units.m).x
0.0 m
"""
d2=(particles.x-x)**2+(particles.y-y)**2+(particles.z-z)**2
return particles[d2.number.argmin()]
def potential_energy_in_field(particles, field_particles, smoothing_length_squared = zero, G = constants.G, just_potential = False):
"""
Returns the total potential energy of the particles associated with an external
gravitational field, which is represented by the field_particles.
:argument field_particles: the external field consists of these (i.e. potential energy is calculated relative to the field particles)
:argument smooting_length_squared: gravitational softening, added to every distance**2.
:argument G: gravitational constant, need to be changed for particles in different units systems
>>> from amuse.datamodel import Particles
>>> field_particles = Particles(2)
>>> field_particles.x = [0.0, 2.0] | units.m
>>> field_particles.y = [0.0, 0.0] | units.m
>>> field_particles.z = [0.0, 0.0] | units.m
>>> field_particles.mass = [1.0, 1.0] | units.kg
>>> particles = Particles(2)
>>> particles.x = [1.0, 3.0] | units.m
>>> particles.y = [0.0, 0.0] | units.m
>>> particles.z = [0.0, 0.0] | units.m
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.potential_energy_in_field(field_particles)
quantity<-2.22476e-10 m**2 * kg * s**-2>
"""
if len(field_particles) == 0:
return zero * G
n = len(particles)
dimensions = particles.position.shape[-1]
transposed_positions = particles.position.reshape([n,1,dimensions])
dxdydz = transposed_positions - field_particles.position
dr_squared = (dxdydz**2).sum(-1)
dr = (dr_squared+smoothing_length_squared).sqrt()
if just_potential:
m_m = field_particles.mass
return -G * (m_m / dr).sum(1)
else:
m_m = particles.mass.reshape([n,1]) * field_particles.mass
return -G * (m_m / dr).sum()
def distances_squared(particles, other_particles):
"""
Returns the distance squared from each particle in this set to each of the particles in the other set.
:argument other_particles: the particles to which the distance squared is calculated
>>> from amuse.datamodel import Particles
>>> field_particles = Particles(2)
>>> field_particles.x = [0.0, 2.0] | units.m
>>> field_particles.y = [0.0, 0.0] | units.m
>>> field_particles.z = [0.0, 0.0] | units.m
>>> particles = Particles(3)
>>> particles.x = [1.0, 3.0, 4.0] | units.m
>>> particles.y = [0.0, 0.0, 0.0] | units.m
>>> particles.z = [0.0, 0.0, 0.0] | units.m
>>> particles.distances_squared(field_particles)
quantity<[[1.0, 1.0], [9.0, 1.0], [16.0, 4.0]] m**2>
"""
transposed_positions = particles.position.reshape((len(particles), 1, -1))
dxdydz = transposed_positions - other_particles.position
return (dxdydz**2).sum(-1)
def nearest_neighbour(particles, neighbours=None, max_array_length=10000000):
"""
Returns the nearest neighbour of each particle in this set. If the 'neighbours'
particle set is supplied, the search is performed on the neighbours set, for
each particle in the orignal set. Otherwise the nearest neighbour in the same
set is searched.
:argument neighbours: the particle set in which to search for the nearest neighbour (optional)
>>> from amuse.datamodel import Particles
>>> particles = Particles(3)
>>> particles.x = [1.0, 3.0, 4.0] | units.m
>>> particles.y = [0.0, 0.0, 0.0] | units.m
>>> particles.z = [0.0, 0.0, 0.0] | units.m
>>> particles.nearest_neighbour().x
quantity<[3.0, 4.0, 3.0] m>
>>> field_particles = Particles(2)
>>> field_particles.x = [0.0, 2.5] | units.m
>>> field_particles.y = [0.0, 0.0] | units.m
>>> field_particles.z = [0.0, 0.0] | units.m
>>> particles.nearest_neighbour(field_particles).x
quantity<[0.0, 2.5, 2.5] m>
"""
if neighbours is None:
other_particles = particles
else:
other_particles = neighbours
if len(particles) * len(other_particles) * 3 > max_array_length:
neighbour_indices = []
particles_per_batch = max(1, max_array_length // (3 * len(other_particles)))
number_of_batches = (len(particles) - 1) // particles_per_batch + 1
indices_in_each_batch = [numpy.arange(particles_per_batch) + i*particles_per_batch for i in range(number_of_batches-1)]
indices_in_each_batch.append(numpy.arange(indices_in_each_batch[-1][-1]+1, len(particles)))
for indices in indices_in_each_batch:
distances_squared = particles[indices].distances_squared(other_particles)
if neighbours is None:
diagonal_indices = (numpy.arange(len(indices)), indices)
distances_squared.number[diagonal_indices] = numpy.inf # can't be your own neighbour
neighbour_indices.append(distances_squared.argmin(axis=1))
return other_particles[numpy.concatenate(neighbour_indices)]
distances_squared = particles.distances_squared(other_particles)
if neighbours is None:
diagonal_indices = numpy.diag_indices(len(particles))
distances_squared.number[diagonal_indices] = numpy.inf # can't be your own neighbour
return other_particles[distances_squared.argmin(axis=1)]
def velocity_diff_squared(particles,field_particles):
"""
Returns the total potential energy of the particles in the particles set.
:argument field_particles: the external field consists of these (i.e. potential energy is calculated relative to the field particles)
>>> from amuse.datamodel import Particles
>>> field_particles = Particles(2)
>>> field_particles.vx = [0.0, 2.0] | units.m
>>> field_particles.vy = [0.0, 0.0] | units.m
>>> field_particles.vz = [0.0, 0.0] | units.m
>>> particles = Particles(3)
>>> particles.vx = [1.0, 3.0, 4] | units.m
>>> particles.vy = [0.0, 0.0, 0.0] | units.m
>>> particles.vz = [0.0, 0.0, 0.0] | units.m
>>> velocity_diff_squared(particles, field_particles)
quantity<[[1.0, 1.0], [9.0, 1.0], [16.0, 4.0]] m**2>
"""
n = len(particles)
dimensions = particles.velocity.shape[-1]
transposed_positions = particles.velocity.reshape([n,1,dimensions])
dxdydz = transposed_positions - field_particles.velocity
return (dxdydz**2).sum(-1)
def Qparameter(parts, distfunc=None):
"""
Calculates the minimum spanning tree Q parameter (Cartwright & Whitworth 2004)
for a projection of the particle set.
:argument distfunc: distfunc is the distance function which can be used to select
the projection plane.
"""
if distfunc is None:
def distfunc(p,q):
return (((p.x-q.x)**2+(p.y-q.y)**2)**0.5).value_in(p.x.unit)
N=len(parts)
graph=Graph()
for p in parts:
d=distfunc(p,parts)
for i,q in enumerate(parts):
if p!=q:
graph.add_edge(p,q, d[i] )
all_edges=graph.all_edges()
ml=reduce(lambda x,y: x+y[0],all_edges,zero )/len(all_edges)
mst=MinimumSpanningTreeFromEdges(all_edges)
mlmst=reduce(lambda x,y: x+y[0],mst, zero )/len(mst)
# normalize
mlmst=mlmst/(N*numpy.pi)**0.5*(N-1)
return mlmst/ml
def connected_components(parts, threshold=None, distfunc=None, verbose=False):
"""
return a list of connected component subsets of particles, connected if the distfunc
is smaller than the threshold.
:argument threshold: value of the threshold. Must have consistent units with distfunc
:argument distfunc: distance or weight function. Must have consistent units with threshold
"""
if threshold is None:
threshold=1. | parts.x.unit
if distfunc is None:
def distfunc(p,q):
return (((p.x-q.x)**2+(p.y-q.y)**2+(p.z-q.z)**2)**0.5)
if verbose: print("making CC")
tocheck=list(range(len(parts)))
cc=[]
while len(tocheck)>0:
p=tocheck.pop()
stack=[p]
currentcc=[p]
while len(stack)>0 and len(tocheck)>0:
p=stack.pop()
d=distfunc(parts[p],parts[tocheck]).value_in(threshold.unit)
toadd=[ tocheck.pop(i) for i in reversed(range(len(tocheck))) if d[i] < threshold.number ]
stack.extend(toadd)
currentcc.extend(toadd)
cc.append(parts[currentcc])
if verbose: print("done")
if verbose: print("number of CC:",len(cc))
return cc
def minimum_spanning_tree_length(particles):
"""
Calculates the length of the minimum spanning tree (MST) of a set of particles
using David Eppstein's Python implemention of Kruskal's algorithm.
"""
graph = Graph()
for particle in particles:
others = particles - particle
distances = (particle.position - others.position).lengths()
for other, distance in zip(others, distances):
graph.add_edge(particle, other, distance)
return sum([edge[0] for edge in MinimumSpanningTree(graph)], zero)
MassSegregationRatioResults = namedtuple('MassSegregationRatioResults',
['mass_segregation_ratio', 'uncertainty'])
def mass_segregation_ratio(particles, number_of_particles=20, number_of_random_sets=50,
also_compute_uncertainty=False):
"""
Calculates the mass segregation ratio (Allison et al. 2009, MNRAS 395 1449).
(1) Determine the length of the minimum spanning tree (MST) of the
'number_of_particles' most massive stars; l_massive
(2) Determine the average length of the MST of 'number_of_random_sets' sets
of 'number_of_particles' random stars; l_norm
(3) Determine with what statistical significance l_massive differs from l_norm:
MSR = (l_norm / l_massive) +/- (sigma_norm / l_massive)
:argument number_of_particles: the number of most massive stars for the MST for l_massive
:argument number_of_random_sets: the number of randomly selected subsets for
which the MST is calculated to determine l_norm
:argument also_compute_uncertainty: if True, a namedtuple is returned with (MSR, sigma)
"""
most_massive = particles.sorted_by_attribute("mass")[-number_of_particles:]
l_massive = most_massive.minimum_spanning_tree_length()
l_norms = [] | particles.position.unit
for i in range(number_of_random_sets):
l_norms.append(particles.random_sample(number_of_particles).minimum_spanning_tree_length())
msr = l_norms.mean() / l_massive
if also_compute_uncertainty:
sigma = l_norms.std() / l_massive
return MassSegregationRatioResults(mass_segregation_ratio=msr, uncertainty=sigma)
else:
return msr
def mass_segregation_from_nearest_neighbour(particles, number_of_particles=None,
fraction_of_particles=0.01, number_of_random_sets=None, also_compute_uncertainty=False):
"""
Calculates the mass segregation ratio based on the average inverse distance
to nearest neighbours.
(1) Determine average inverse distances to the nearest neighbour for
'number_of_random_sets' sets of 'number_of_particles' random stars;
mean_idnn[number_of_random_sets]
(2) Determine the average inverse distance to the nearest neighbour of the
'number_of_particles' most massive stars; mean_idnn_massive
(3) Determine with what statistical significance l_massive differs from l_norm:
MSR = mean_idnn_massive / <mean_idnn> +/ - sigma(mean_idnn) / <mean_idnn>
:argument number_of_particles: the number of particles in each (random and massive) sample
:argument number_of_random_sets: the number of randomly selected subsets for
which the average inverse distances to the nearest neighbour are calculated
:argument also_compute_uncertainty: if True, a namedtuple is returned with (MSR, sigma)
"""
if number_of_particles is None:
number_of_particles = -int(-fraction_of_particles * len(particles))
if number_of_random_sets is None:
number_of_random_sets = -(-len(particles) // number_of_particles)
mean_idnn = [] | particles.position.unit**-1
for i in range(number_of_random_sets):
sample = particles.random_sample(number_of_particles)
distances = (sample.position - sample.nearest_neighbour().position).lengths()
mean_idnn.append((1.0/distances).mean())
massive = particles.sorted_by_attribute('mass')[-number_of_particles:]
distances = (massive.position - massive.nearest_neighbour().position).lengths()
mean_idnn_massive = (1.0/distances).mean()
msr = mean_idnn_massive / mean_idnn.mean()
if also_compute_uncertainty:
sigma = mean_idnn.std() / mean_idnn.mean()
return MassSegregationRatioResults(mass_segregation_ratio=msr, uncertainty=sigma)
else:
return msr
def correlation_dimension(particles, max_array_length=10000000):
"""
Computes the correlation dimension, a measure of the fractal dimension of a
set of points. The measure is based on counting the number of pairs with a
mutual distance less than 'eps', for varying values of 'eps'.
"""
size = (particles.position.max(axis=0) - particles.position.min(axis=0)).max()
eps2_range = (size / 2**numpy.arange(2.0, 6.0, 0.1))**2
if 3 * len(particles)**2 > max_array_length:
counts_per_batch = []
particles_per_batch = max(1, max_array_length // (3 * len(particles)))
number_of_batches = (len(particles) - 1) // particles_per_batch + 1
indices_in_each_batch = [numpy.arange(particles_per_batch) + i*particles_per_batch for i in range(number_of_batches-1)]
indices_in_each_batch.append(numpy.arange(indices_in_each_batch[-1][-1]+1, len(particles)))
for indices in indices_in_each_batch:
distances_squared = particles[indices].distances_squared(particles)
diagonal_indices = (numpy.arange(len(indices)), indices)
distances_squared.number[diagonal_indices] = numpy.inf # can't be your own neighbour
counts_per_batch.append([(distances_squared < eps2).sum() for eps2 in eps2_range])
number_of_close_pairs = numpy.array(counts_per_batch).sum(axis=0)
else:
distances_squared = particles.distances_squared(particles)
diagonal_indices = numpy.diag_indices(len(particles))
distances_squared.number[diagonal_indices] = numpy.inf # can't be your own neighbour
number_of_close_pairs = numpy.array([(distances_squared < eps2).sum() for eps2 in eps2_range])
upper_index = numpy.searchsorted(-number_of_close_pairs, 0) # Prevent log(0)
x = 0.5*numpy.log10(eps2_range.number[:upper_index])
y = numpy.log10(number_of_close_pairs[:upper_index])
fit_coefficients = numpy.polyfit(x, y, 1)
return fit_coefficients[0]
def box_counting_dimension(particles):
"""
Computes the box-counting dimension, a measure of the fractal dimension of a
set of points. The measure is based on counting the number of boxes required
to cover the set, within a regular grid of cubic, equal-size boxes, for
varying box sizes.
"""
moved_positions = particles.position - particles.position.min(axis=0)
scaled_positions = moved_positions * (0.9999 / moved_positions.max())
boxes_per_dimension_range = (2**numpy.arange(1.0, 7.0, 0.05)).round()
number_of_boxes_filled = []
for boxes_per_dimension in boxes_per_dimension_range:
number_of_boxes_filled.append(len(set(
[(r[0], r[1], r[2]) for r in (scaled_positions * boxes_per_dimension).astype(int)]
)))
number_of_boxes_filled = numpy.array(number_of_boxes_filled, dtype=numpy.float64)
# When #filled-boxes ~ #particles, the dimension goes to 0. Exclude those values:
upper_index = numpy.searchsorted(number_of_boxes_filled, 0.2 * len(particles))
x = numpy.log(boxes_per_dimension_range[:upper_index])
y = numpy.log(number_of_boxes_filled[:upper_index])
fit_coefficients = numpy.polyfit(x, y, 1)
return fit_coefficients[0]
def dynamical_timescale(particles, mass_fraction=None, G=constants.G):
"""
Compute the dynamical (i.e. free-fall) timescale of the particles set. This is
the time it would take for a pressureless homogeneous sphere of this size and
average density to collapse. If 'mass_fraction' is supplied, only the inner
particles are considered in the computation of the size of the sphere. For
example, 'mass_fraction=0.95' ignores the positions of the outer particles
comprising 5% by mass (useful for density profiles with long tails).
"""
if mass_fraction is None:
total_radius = particles.total_radius()
else:
total_radius = particles.LagrangianRadii(mf=[mass_fraction], cm=particles.center_of_mass())[0][0]
return numpy.pi * (total_radius**3 / (8.0 * G * particles.total_mass())).sqrt()
AbstractParticleSet.add_global_function_attribute("center_of_mass", center_of_mass)
AbstractParticleSet.add_global_function_attribute("center_of_mass_velocity", center_of_mass_velocity)
AbstractParticleSet.add_global_function_attribute("kinetic_energy", kinetic_energy)
AbstractParticleSet.add_global_function_attribute("potential_energy", potential_energy)
AbstractParticleSet.add_global_function_attribute("thermal_energy", thermal_energy)
AbstractParticleSet.add_global_function_attribute("virial_radius", virial_radius)
AbstractParticleSet.add_global_function_attribute("total_mass", total_mass)
AbstractParticleSet.add_global_function_attribute("total_radius", total_radius)
AbstractParticleSet.add_global_function_attribute("total_momentum", total_momentum)
AbstractParticleSet.add_global_function_attribute("total_angular_momentum", total_angular_momentum)
AbstractParticleSet.add_global_function_attribute("moment_of_inertia", moment_of_inertia)
AbstractParticleSet.add_global_function_attribute("dynamical_timescale", dynamical_timescale)
AbstractParticleSet.add_global_function_attribute("potential_energy_in_field", potential_energy_in_field)
AbstractParticleSet.add_global_vector_attribute("position", ["x","y","z"])
AbstractParticleSet.add_global_vector_attribute("velocity", ["vx","vy","vz"])
AbstractParticleSet.add_global_vector_attribute("acceleration", ["ax","ay","az"])
AbstractParticleSet.add_global_vector_attribute("angular_momentum", ["lx","ly","lz"])
AbstractParticleSet.add_global_vector_attribute("oblateness", ["j2","j4","j6"])
AbstractParticleSet.add_global_function_attribute("specific_kinetic_energy", specific_kinetic_energy, particle_specific_kinetic_energy)
AbstractParticleSet.add_global_function_attribute("potential", particleset_potential, particle_potential)
AbstractParticleSet.add_global_function_attribute("move_to_center", move_to_center)
AbstractParticleSet.add_global_function_attribute("scale_to_standard", scale_to_standard)
AbstractParticleSet.add_global_function_attribute("rotate", rotation.rotate)
AbstractParticleSet.add_global_function_attribute("add_spin", rotation.add_spin)
AbstractParticleSet.add_global_function_attribute("binaries", get_binaries)
AbstractParticleSet.add_global_function_attribute("get_binaries", get_binaries)
AbstractParticleSet.add_global_function_attribute("densitycentre_coreradius_coredens", densitycentre_coreradius_coredens)
AbstractParticleSet.add_global_function_attribute("new_particle_from_cluster_core", new_particle_from_cluster_core)
AbstractParticleSet.add_global_function_attribute("cluster_core", new_particle_from_cluster_core)
AbstractParticleSet.add_global_function_attribute("bound_subset", bound_subset)
AbstractParticleSet.add_global_function_attribute("mass_segregation_Gini_coefficient", mass_segregation_Gini_coefficient)
AbstractParticleSet.add_global_function_attribute("LagrangianRadii", LagrangianRadii)
AbstractParticleSet.add_global_function_attribute("find_closest_particle_to", find_closest_particle_to)
AbstractParticleSet.add_global_function_attribute("distances_squared", distances_squared)
AbstractParticleSet.add_global_function_attribute("nearest_neighbour", nearest_neighbour)
AbstractParticleSet.add_global_function_attribute("Qparameter", Qparameter)
AbstractParticleSet.add_global_function_attribute("connected_components", connected_components)
AbstractParticleSet.add_global_function_attribute("minimum_spanning_tree_length", minimum_spanning_tree_length)
AbstractParticleSet.add_global_function_attribute("mass_segregation_ratio", mass_segregation_ratio)
AbstractParticleSet.add_global_function_attribute("mass_segregation_from_nearest_neighbour", mass_segregation_from_nearest_neighbour)
AbstractParticleSet.add_global_function_attribute("correlation_dimension", correlation_dimension)
AbstractParticleSet.add_global_function_attribute("box_counting_dimension", box_counting_dimension)
AbstractParticleSet.add_global_vector_attribute("natal_kick_velocity", ["natal_kick_x","natal_kick_y","natal_kick_z"])
| 47,337
| 39.668385
| 138
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/grids.py
|
from amuse.support.core import CompositeDictionary, late
from amuse.units import constants
from amuse.units import units
from amuse.units import generic_unit_system
from amuse.units import quantities
from amuse.units.quantities import Quantity
from amuse.units.quantities import VectorQuantity
from amuse.units.quantities import new_quantity
from amuse.units.quantities import zero
from amuse.units.quantities import stack
from amuse.support import exceptions
from amuse.datamodel.base import *
from amuse.datamodel.memory_storage import *
import numpy
from amuse.datamodel import indexing
class AbstractGrid(AbstractSet):
GLOBAL_DERIVED_ATTRIBUTES = {}
def _get_value_of_attribute(self, particle, index, attribute):
if attribute in self._derived_attributes:
return self._derived_attributes[attribute].get_value_for_entity(self, particle, index)
else:
return self._convert_to_entities_or_quantities(self.get_values_in_store(index, [attribute])[0])
def _set_value_of_attribute(self, key, attribute, value):
if attribute in self._derived_attributes:
return self._derived_attributes[attribute].set_value_for_entity(self, key, value)
else:
return self.set_values_in_store(key, [attribute], [value])
def _get_values_for_entity(self, key, attributes):
return self.get_values_in_store(key, attributes)
def _set_values_for_entity(self, key, attributes, values):
return self.set_values_in_store(key, attributes, values)
def _get_particle(self, index):
return GridPoint(index, self._original_set())
def previous_state(self):
return self._private.previous
def savepoint(self, timestamp=None, **attributes):
try:
instance = type(self)()
instance._private.attribute_storage = self._private.attribute_storage.copy()
except:
instance=self.copy() # for the case of subgrid, maybe always ok
instance.collection_attributes.timestamp = timestamp
for name, value in attributes.items():
setattr(instance.collection_attributes, name, value)
instance._private.previous = self._private.previous
self._private.previous = instance
return instance
def get_timestamp(self):
return self.collection_attributes.timestamp
def new_channel_to(self, other, attributes=None, target_names=None):
return GridInformationChannel(self, other, attributes, target_names)
def new_remapping_channel_to(self, other, remapper):
return GridRemappingChannel(self, other, remapper)
def copy(self, memento = None, keep_structure = False, filter_attributes = lambda particle_set, x : True):
attributes = self.get_attribute_names_defined_in_store()
attributes = [x for x in attributes if filter_attributes(self, x)]
values = self.get_values_in_store(Ellipsis, attributes)
result = self._factory_for_new_collection()(*self.shape)
if memento is None:
memento = {}
memento[id(self._original_set())] = result
converted = []
for x in values:
if isinstance(x, LinkedArray):
converted.append(x.copy(memento, keep_structure))
else:
converted.append(x)
result.set_values_in_store(Ellipsis, attributes, converted)
object.__setattr__(result, "_derived_attributes", CompositeDictionary(self._derived_attributes))
result._private.collection_attributes = self._private.collection_attributes._copy_for_collection(result)
return result
def _factory_for_new_collection(self):
return self.__class__
def empty_copy(self):
result = self._factory_for_new_collection()(*self.shape)
result.set_values_in_store(None, [],[])
object.__setattr__(result, "_derived_attributes", CompositeDictionary(self._derived_attributes))
result._private.collection_attributes = self._private.collection_attributes._copy_for_collection(result)
return result
def samplePoint(self, position=None, method="nearest", **kwargs):
if method in ["nearest"]:
return SamplePointOnCellCenter(self, position=position, **kwargs)
elif method in ["interpolation", "linear"]:
return SamplePointWithInterpolation(self, position=position, **kwargs)
else:
raise Exception("unknown sample method")
def samplePoints(self, positions=None, method="nearest", **kwargs):
if method in ["nearest"]:
return SamplePointsOnGrid(self, positions, SamplePointOnCellCenter, **kwargs)
elif method in ["interpolation", "linear"]:
return SamplePointsOnGrid(self, positions, SamplePointWithInterpolation, **kwargs)
else:
raise Exception("unknown sample method")
def __len__(self):
return self.shape[0]
def __iter__(self):
for i in range(self.shape[0]):
yield self[i]
def get_all_indices_in_store(self):
return self.get_all_keys_in_store()
def can_extend_attributes(self):
return self._original_set().can_extend_attributes()
def __str__(self):
dimensionstr = ' x '.join(([str(x) for x in self.shape]))
attributes=self.get_attribute_names_defined_in_store()
settable=self.get_defined_settable_attribute_names()
strings=[a if a in settable else a+" (ro)" for a in attributes]
attrstr= ', '.join(strings)
return "{0}({1}) ( {2} )".format(
self.__class__.__name__,
dimensionstr,
attrstr
)
def iter_history(self):
raise Exception("not implemented")
@property
def history(self):
return reversed(list(self.iter_history()))
def get_timeline_of_attribute(self, attribute):
timeline = []
for x in self.history:
timeline.append((x.collection_attributes.timestamp, getattr(x,attribute)))
return timeline
def get_timeline_of_attribute_as_vector(self, attribute):
timestamps = AdaptingVectorQuantity()
timeline = AdaptingVectorQuantity()
for x in self.history:
timestamps.append(x.collection_attributes.timestamp)
timeline.append(getattr(x,attribute))
return timestamps,timeline
class BaseGrid(AbstractGrid):
def __init__(self, *args, **kwargs):
AbstractGrid.__init__(self)
if "storage" in kwargs:
self._private.attribute_storage = kwargs['storage']
else:
self._private.attribute_storage = InMemoryGridAttributeStorage(*args)
self._private.previous = None
self.collection_attributes.timestamp = None
def can_extend_attributes(self):
return self._private.attribute_storage.can_extend_attributes()
def get_values_in_store(self, indices, attributes, by_key = True):
result = self._private.attribute_storage.get_values_in_store(indices, attributes)
return result
def get_values_in_store_async(self, indices, attributes, by_key = True):
result = self._private.attribute_storage.get_values_in_store_async(indices, attributes)
return result
def set_values_in_store(self, indices, attributes, values, by_key = True):
self._private.attribute_storage.set_values_in_store(indices, attributes, values)
def set_values_in_store_async(self, indices, attributes, values, by_key = True):
return self._private.attribute_storage.set_values_in_store_async(indices, attributes, values)
def get_attribute_names_defined_in_store(self):
return self._private.attribute_storage.get_defined_attribute_names()
def get_defined_settable_attribute_names(self):
return self._private.attribute_storage.get_defined_settable_attribute_names()
def _original_set(self):
return self
def get_all_keys_in_store(self):
return self._private.attribute_storage.get_all_keys_in_store()
def __getitem__(self, index):
return new_subgrid_from_index(self, index)
def iter_cells(self):
shape = numpy.asarray(self.shape)
index = 0 * shape
while index[0] < shape[0]:
yield self._get_gridpoint(tuple(index))
index[-1] += 1
for i in range(len(self.shape) - 1, 0, -1):
if index[i] >= shape[i]:
index[i] = 0
index[i-1] += 1
def _get_gridpoint(self, index):
return GridPoint(index, self)
def number_of_dimensions(self):
return len(self.shape)
@property
def shape(self):
return self._private.attribute_storage.storage_shape()
@property
def size(self):
return numpy.prod(self.shape)
def indices(self):
return numpy.indices(self.shape)
def iter_history(self):
current = self._private.previous
while not current is None:
yield current
current = current._private.previous
@classmethod
def create(cls,*args,**kwargs):
print ("Grid.create deprecated, use new_regular_grid instead")
return new_regular_grid(*args,**kwargs)
def get_axes_names(self):
if hasattr(self.collection_attributes, "axes_names"):
return self.collection_attributes.axes_names
if hasattr(self, "_axes_names"):
return self._axes_names
if "position" in self._derived_attributes:
return self._derived_attributes["position"].attribute_names
if "position" in self.GLOBAL_DERIVED_ATTRIBUTES:
return self.GLOBAL_DERIVED_ATTRIBUTES["position"].attribute_names
raise Exception("do not know how to find axes_names")
def set_axes_names(self, value):
self.add_vector_attribute('position', value)
class UnstructuredGrid(BaseGrid):
GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(BaseGrid.GLOBAL_DERIVED_ATTRIBUTES)
class StructuredBaseGrid(BaseGrid):
GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(BaseGrid.GLOBAL_DERIVED_ATTRIBUTES)
class StructuredGrid(StructuredBaseGrid):
GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(StructuredBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)
class RectilinearBaseGrid(StructuredBaseGrid):
GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(StructuredBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)
class RectilinearGrid(RectilinearBaseGrid):
GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(RectilinearBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)
class RegularBaseGrid(RectilinearBaseGrid):
GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(RectilinearBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)
class RegularGrid(RegularBaseGrid):
GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(RegularBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)
class CartesianBaseGrid(RegularBaseGrid):
GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(RegularBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)
class CartesianGrid(CartesianBaseGrid):
GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(CartesianBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)
# maintains compatibility with previous def.
class Grid(RegularGrid):
GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(RegularBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)
def new_cartesian_grid(shape, cellsize, axes_names = "xyz",offset=None):
"""Returns a cartesian grid with cells of size cellsize.
"""
if len(axes_names)<len(shape):
raise Exception("provide enough axes names")
result = CartesianGrid(*shape)
all_indices = numpy.indices(shape)+0.5
if offset is None:
offset=[0.*cellsize]*len(shape)
def positions(indices):
return cellsize * indices
for indices, n, axis_name, of in zip(all_indices, shape, axes_names,offset):
setattr(result, axis_name, positions(indices)+of)
result.add_vector_attribute("position", axes_names[0:len(shape)])
object.__setattr__(result,"_grid_type","cartesian") # for now for convenience, eventually to be converted in seperate classes
object.__setattr__(result,"_cellsize",cellsize)
return result
def new_regular_grid(shape, lengths, axes_names = "xyz",offset=None):
"""Returns a regular grid with cells between 0 and lengths.
"""
if len(axes_names)<len(shape):
raise Exception("provide enough axes names")
if len(lengths)!=len(shape):
raise Exception("shape and lengths do not conform")
result = RegularGrid(*shape)
all_indices = numpy.indices(shape)+0.5
if offset is None:
offset=[0.*l for l in lengths]
def positions(indices, length, n):
return length * (indices/n)
for indices, length, n, axis_name, of in zip(all_indices, lengths, shape, axes_names,offset):
setattr(result, axis_name, positions(indices, length, n)+of)
result.add_vector_attribute("position", axes_names[0:len(shape)])
object.__setattr__(result,"_grid_type","regular")
object.__setattr__(result,"_lengths",lengths)
return result
def new_rectilinear_grid(shape, axes_cell_boundaries=None, cell_centers=None, axes_names = "xyz",offset=None):
"""Returns a rectilinear grid with cells at positions midway given cell boundaries.
"""
if len(axes_names)<len(shape):
raise Exception("provide enough axes names")
if not (axes_cell_boundaries or cell_centers):
raise Exception("provide cell boundaries or cell_centers")
if axes_cell_boundaries and len(axes_cell_boundaries)!=len(shape):
raise Exception("length of shape and axes positions do not conform")
if axes_cell_boundaries:
for s,b in zip(shape,axes_cell_boundaries):
if len(b)!=s+1:
raise Exception("number of cell boundary arrays error (must be {0} instead of {1})".format(s+1,len(b)))
if cell_centers and len(cell_centers)!=len(shape):
raise Exception("length of shape and axes positions do not conform")
if cell_centers:
for s,b in zip(shape,cell_centers):
if len(b)!=s:
raise Exception("number of cell_center arrays error (must be {0} instead of {1})".format(s+1,len(b)))
result = RectilinearGrid(*shape)
all_indices = numpy.indices(shape)
#~ axes_cell_boundaries=[numpy.sort(b) for b in axes_cell_boundaries]
if axes_cell_boundaries:
positions=[(b[1:]+b[:-1])/2 for b in axes_cell_boundaries]
if cell_centers:
positions=cell_centers
if offset is None:
offset=[0.*l[0] for l in positions]
for indices, axis_pos, axis_name, of in zip(all_indices, positions, axes_names, offset):
setattr(result, axis_name, axis_pos[indices]+of)
result.add_vector_attribute("position", axes_names[0:len(shape)])
object.__setattr__(result,"_grid_type","rectilinear")
object.__setattr__(result,"_axes_cell_boundaries",axes_cell_boundaries)
object.__setattr__(result,"_cell_centers",cell_centers)
return result
def new_structured_grid(shape, cell_corners, cell_positions=None, axes_names = "xyz", offset=None):
"""Returns a structured grid with cells with given corners and cell_positions.
if not present, cell positions default to average of corner positions.
"""
if len(axes_names)<len(shape):
raise Exception("provide enough axes names")
if len(cell_corners)!=len(shape):
raise Exception("dimensions of shape and cell_boundaries do not conform")
for c in cell_corners:
if not numpy.all([s1==s2+1 for s1,s2 in zip(c.shape,shape)]):
shape1=[s+1 for s in shape]
raise Exception("size of cell_corner arrays must be {0} instead of {1}".format(shape1.__str__(),c.shape.__str__()))
if cell_positions is None:
cell_positions=[]
for cc in cell_corners:
cp=numpy.zeros(shape) * cc.flat[0]
for i in range(2**len(shape)):
slicing=()
for j in range(len(shape)):
if i & 2**j:
slicing+=(slice(1,None),)
else:
slicing+=(slice(None,-1),)
cp=cp+cc[slicing]
cell_positions.append(cp/2**len(shape))
if len(cell_positions)!=len(shape):
raise Exception("dimensions of shape and cell_positions do not conform")
for c in cell_positions:
if not numpy.all([s1==s2 for s1,s2 in zip(c.shape,shape)]):
raise Exception("size of cell_position arrays must be {0} instead of {1}".format(shape1.__str__(),c.shape.__str__()))
if offset is None:
offset=[0.*l.flat[0] for l in cell_positions]
result = StructuredGrid(*shape)
for axis_name, pos, of in zip(axes_names, cell_positions, offset):
setattr(result, axis_name, pos + of)
result.add_vector_attribute("position", axes_names[0:len(shape)])
object.__setattr__(result,"_grid_type","structured")
object.__setattr__(result,"_cell_corners", cell_corners)
return result
def new_unstructured_grid(size, num_corners, cell_corners, cell_positions=None, axes_names="xyz", offset=None):
"""Returns an unstructured grid with cells with given corners and cell_positions.
if not present, cell positions default to average of corner positions.
"""
dimensions = cell_corners.size / (num_corners * size)
if len(axes_names)<dimensions:
raise Exception("provide enough axes names")
if len(cell_corners.shape) != 2:
raise Exception("incorrect shape for cell_corners, the number of dimensions of the array should be exactly three (dimensions, size, corners)")
if cell_corners.shape[0] != dimensions:
raise Exception("incorrect shape for cell_corners, first dimension should equal the number of dimensions of the space in which the grid is defined")
if cell_corners.shape[1] != size * num_corners:
raise Exception("incorrect shape for cell_corners, second dimension should equal the grid size times the number of corners per cell")
if cell_positions is None:
cell_positions=[]
for cc in cell_corners:
c = cc.reshape(size, num_corners)
cp=numpy.zeros(size)
for i in range(size):
cp[i] = c[i].sum() / num_corners
cell_positions.append(cp)
cell_positions = numpy.array(cell_positions)
if len(cell_positions.shape) != 2:
raise Exception("incorrect shape for cell_positions, the number of dimensions of the array should be exactly two (dimensions, size)")
if cell_positions.shape[0] != dimensions:
raise Exception("dimensions of cell_positions and cell_corners do not conform")
if cell_positions.shape[1] != size:
raise Exception("size of cell_positions and size do not conform")
if offset is None:
offset=[0.*l.flat[0] for l in cell_positions]
result = UnstructuredGrid(size)
for axis_name, pos, of in zip(axes_names, cell_positions, offset):
setattr(result, axis_name, pos + of)
result.add_vector_attribute("position", axes_names[0:dimensions])
object.__setattr__(result,"_grid_type","unstructured")
object.__setattr__(result,"_num_corners", num_corners)
object.__setattr__(result,"_cell_corners", cell_corners)
return result
class SubGrid(AbstractGrid):
def __init__(self, grid, indices):
AbstractGrid.__init__(self, grid)
self._private.previous=None
self._private.grid = grid
self._private.indices = indexing.normalize_slices(grid.shape,indices)
self._private.collection_attributes=grid.collection_attributes
def _original_set(self):
return self._private.grid
def previous_state(self):
previous=self._private.previous
if previous:
return previous
previous=self._private.grid.previous_state()
if previous:
return previous[self._private.indices]
return previous
def get_values_in_store(self, indices, attributes, by_key = True):
normalized_indices = indexing.normalize_slices(self.shape,indices)
combined_index = indexing.combine_indices(self._private.indices, normalized_indices)
result = self._private.grid.get_values_in_store(combined_index, attributes)
return result
def get_values_in_store_async(self, indices, attributes, by_key = True):
normalized_indices = indexing.normalize_slices(self.shape,indices)
combined_index = indexing.combine_indices(self._private.indices, normalized_indices)
result = self._private.grid.get_values_in_store_async(combined_index, attributes)
return result
def set_values_in_store(self, indices, attributes, values, by_key = True):
normalized_indices = indexing.normalize_slices(self.shape,indices)
combined_index = indexing.combine_indices(self._private.indices, normalized_indices)
self._private.grid.set_values_in_store(combined_index, attributes, values)
def set_values_in_store_async(self, indices, attributes, values, by_key = True):
normalized_indices = indexing.normalize_slices(self.shape,indices)
combined_index = indexing.combine_indices(self._private.indices, normalized_indices)
return self._private.grid.set_values_in_store_async(combined_index, attributes, values)
def get_all_keys_in_store(self):
return Ellipsis
def number_of_dimensions(self):
return indexing.number_of_dimensions_after_index(self._original_set().number_of_dimensions(), self._private.indices)
def __getitem__(self, index):
normalized_index= indexing.normalize_slices(self.shape,index)
combined_index = indexing.combine_indices(self._private.indices, normalized_index)
return new_subgrid_from_index(self._original_set(), combined_index)
def get_attribute_names_defined_in_store(self):
return self._private.grid.get_attribute_names_defined_in_store()
def get_defined_settable_attribute_names(self):
return self._private.grid.get_defined_settable_attribute_names()
@property
def shape(self):
return indexing.shape_after_index(self._private.grid.shape, self._private.indices )
def indices(self):
return [x[self._private.indices] for x in self._original_set().indices()]
def __eq__(self, other):
if self._private.grid != other._private.grid:
return False
elif self.shape != other.shape:
return False
else:
if numpy.all(numpy.array(self.indices())==numpy.array(other.indices())):
return True
else:
return False
def __ne__(self,other):
return not(self==other)
def _factory_for_new_collection(self):
return Grid
def iter_history(self):
if self._private.previous:
current = self._private.previous
while not current is None:
yield current
current = current._private.previous
return
current = self._original_set().previous_state()
while not current is None:
yield current[self._private.indices]
current = current.previous_state()
class GridPoint(object):
def __init__(self, index, grid):
object.__setattr__(self,"index",index)
object.__setattr__(self,"grid",grid)
def __setattr__(self, name_of_the_attribute, new_value_for_the_attribute):
try:
self.grid._set_value_of_attribute(self.index, name_of_the_attribute, new_value_for_the_attribute)
except Exception as ex:
raise
raise AttributeError("Could not assign to attribute {0}.".format(name_of_the_attribute))
def __getattr__(self, name_of_the_attribute):
return self.grid._get_value_of_attribute(self, self.index, name_of_the_attribute)
def __eq__(self, other):
return isinstance(other, type(self)) and other.index == self.index and other.grid == self.grid
def __ne__(self, other):
return not(isinstance(other, type(self)) and other.index == self.index and other.grid == self.grid)
def get_containing_set(self):
return self.grid
def iter_history(self):
current = self.get_containing_set().previous_state()
while not current is None:
yield current[self.index]
current = current.previous_state()
@property
def history(self):
return reversed(list(self.iter_history()))
def get_timeline_of_attribute(self, attribute):
timeline = []
for x in self.history:
timeline.append((x.grid.collection_attributes.timestamp, getattr(x,attribute)))
return timeline
def get_timeline_of_attribute_as_vector(self, attribute):
timestamps = AdaptingVectorQuantity()
timeline = AdaptingVectorQuantity()
for x in self.history:
timestamps.append(x.grid.collection_attributes.timestamp)
timeline.append(getattr(x,attribute))
return timestamps,timeline
def new_subgrid_from_index(grid, index):
if indexing.number_of_dimensions_after_index(grid.number_of_dimensions(), index) == 0:
return GridPoint(index, grid)
else:
return SubGrid(grid, index)
class GridRemappingChannel(object):
"""
A channel to remap attributes from one grid to another.
"""
def __init__(self, source, target, remapper):
self.source = source
self.target = target
if callable(remapper):
self.remapper = remapper( source, target)
else:
self.remapper = remapper
def get_overlapping_attributes(self):
from_names = self.source.get_attribute_names_defined_in_store()
to_names = self.target.get_defined_settable_attribute_names()
names_to_copy = set(from_names).intersection(set(to_names))
return list(names_to_copy)
def copy_attributes(self, attributes, target_names=None):
self.remapper.forward_mapping(attributes, target_names)
def copy(self):
if not self.target.can_extend_attributes():
self.copy_overlapping_attributes()
else:
self.copy_all_attributes()
def copy_all_attributes(self):
names_to_copy = self.source.get_attribute_names_defined_in_store()
self.copy_attributes(list(names_to_copy))
def copy_overlapping_attributes(self):
names_to_copy = self.get_overlapping_attributes()
self.copy_attributes(names_to_copy)
class GridInformationChannel(object):
"""
A channel to copy attributes from one grid to another.
For each dimension copies cells from 0 - min(grid0.size, grid1.size).
"""
def __init__(self, source, target, attributes=None, target_names=None):
self.source = source
self.target = target
self.attributes = attributes
self.target_names = target_names
self._reindex()
def _reindex(self):
source_shape = self.source.shape
target_shape = self.target.shape
if len(source_shape) != len(target_shape):
raise exceptions.AmuseException("The source and target grids do not have the same dimensions, cannot use this channel")
index = [numpy.s_[0:min(x,y)] for x,y in zip(source_shape, target_shape)]
index = tuple(index)
self.index = index
def reverse(self):
if self.target_names is None:
attributes = self.attributes
target_names = self.target_names
else:
attributes = self.target_names
target_names = self.attributes
return GridInformationChannel(
self.target,
self.source,
attributes,
target_names
)
def get_values(self, attributes):
values = self.source.get_values_in_store(self.index, attributes)
converted = []
for x in values:
if isinstance(x, LinkedArray):
converted.append(x.copy_with_link_transfer(self.source, self.target))
else:
converted.append(x)
return converted
def get_overlapping_attributes(self):
from_names = self.source.get_attribute_names_defined_in_store()
to_names = self.target.get_defined_settable_attribute_names()
names_to_copy = set(from_names).intersection(set(to_names))
return list(names_to_copy)
def copy_attributes(self, attributes, target_names = None):
if target_names is None:
target_names = attributes
converted=self.get_values(attributes)
self.target.set_values_in_store(self.index, target_names, converted)
def copy(self):
if not self.attributes is None:
self.copy_attributes(self.attributes, self.target_names)
elif not self.target.can_extend_attributes():
self.copy_overlapping_attributes()
else:
self.copy_all_attributes()
def copy_all_attributes(self):
names_to_copy = self.source.get_attribute_names_defined_in_store()
self.copy_attributes(list(names_to_copy))
def copy_overlapping_attributes(self):
names_to_copy = self.get_overlapping_attributes()
self.copy_attributes(names_to_copy)
def transform_values(self, attributes, f):
values = self.source.get_values_in_store(self.index, attributes)
return f(*values)
def transform(self, target, function, source):
""" Copy and transform values of one attribute from the source set to the target set.
:argument target: name of the attributes in the target set
:argument function: function used for transform, should return tuple
:argument source: name of the attribute in the source set
>>> from amuse.datamodel import Grid
>>> grid1 = Grid(2)
>>> grid2 = Grid(2)
>>> grid1.attribute1 = 1
>>> grid1.attribute2 = 2
>>> channel = grid1.new_channel_to(grid2)
>>> channel.transform(["attribute3","attribute4"], lambda x,y: (y+x,y-x), ["attribute1","attribute2"])
>>> print grid2.attribute3
[3 3]
>>> print grid2.attribute4
[1 1]
"""
if function is None:
function=lambda *x : x
if not self.target.can_extend_attributes():
target_attributes = self.target.get_defined_settable_attribute_names()
if not set(target).issubset(set(target_attributes)):
raise Exception("trying to set unsettable attributes {0}".format(
list(set(target)-set(target_attributes))) )
converted=self.transform_values(source, function)
if len(converted) != len(target):
raise Exception("function {0} returns {1} values while target attributes are {2} of length {3}".format(
function.__name__, len(converted), target, len(target)))
self.target.set_values_in_store(self.index, target, converted)
class SamplePointOnCellCenter(object):
def __init__(self, grid, point=None, **kwargs):
self.grid = grid
self.point = self.grid._get_array_of_positions_from_arguments(pos=point, **kwargs)
@late
def position(self):
return self.cell.position
@late
def index(self):
return self.grid.get_index(self.point)
@late
def isvalid(self):
return numpy.logical_and(
numpy.all(self.index >= self.grid.get_minimum_index()[:len(self.index)]),
numpy.all(self.index <= self.grid.get_maximum_index()[:len(self.index)])
)
@late
def cell(self):
return self.grid[tuple(self.index)]
def get_value_of_attribute(self, name_of_the_attribute):
return getattr(self.cell, name_of_the_attribute)
def __getattr__(self, name_of_the_attribute):
return self.get_value_of_attribute(name_of_the_attribute)
class SamplePointWithInterpolation(object):
"""
Vxyz =
V000 (1 - x) (1 - y) (1 - z) +
V100 x (1 - y) (1 - z) +
V010 (1 - x) y (1 - z) +
V001 (1 - x) (1 - y) z +
V101 x (1 - y) z +
V011 (1 - x) y z +
V110 x y (1 - z) +
V111 x y z
"""
def __init__(self, grid, point=None, **kwargs):
self.grid = grid
self.point = self.grid._get_array_of_positions_from_arguments(pos=point, **kwargs)
@late
def position(self):
return self.point
@late
def index(self):
return self.grid.get_index(self.point)
@late
def index_for_000_cell(self):
offset = self.point - self.grid[0,0,0].position
indices = (offset / self.grid.cellsize())
return numpy.floor(indices).astype(numpy.int32)
@late
def index_for_111_cell(self):
return self.index_for_000_cell + [1,1,1]
@late
def surrounding_cell_indices(self):
cell000 = self.index_for_000_cell
translations = [
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 0],
[1, 1, 1],
]
return cell000 + translations
@late
def weighing_factors(self):
x0,y0,z0 = self.grid[tuple(self.index_for_000_cell)].position
x1,y1,z1 = self.grid[tuple(self.index_for_000_cell + [1,1,1])].position
x,y,z = self.point
dx1 = (x1 - x) / (x1 - x0)
dy1 = (y1 - y) / (y1 - y0)
dz1 = (z1 - z) / (z1 - z0)
dx0 = (x - x0) / (x1 - x0)
dy0 = (y - y0) / (y1 - y0)
dz0 = (z - z0) / (z1 - z0)
result = numpy.asarray([
dx1 * dy1 * dz1,
dx0 * dy1 * dz1,
dx1 * dy0 * dz1,
dx1 * dy1 * dz0,
dx0 * dy1 * dz0,
dx1 * dy0 * dz0,
dx0 * dy0 * dz1,
dx0 * dy0 * dz0
] )
return result
@late
def surrounding_cells(self):
return [self.grid[tuple(x)] for x in self.surrounding_cell_indices]
@late
def isvalid(self):
return numpy.logical_and(
numpy.all(self.index_for_000_cell >= self.grid.get_minimum_index()[:len(self.index)]),
numpy.all(self.index_for_111_cell <= self.grid.get_maximum_index()[:len(self.index)])
)
def get_values_of_attribute(self, name_of_the_attribute):
result = quantities.AdaptingVectorQuantity()
for x in self.surrounding_cells:
result.append(getattr(x, name_of_the_attribute))
return result
def __getattr__(self, name_of_the_attribute):
values = self.get_values_of_attribute(name_of_the_attribute)
return (values * self.weighing_factors).sum()
class SamplePointsOnGrid(object):
def __init__(self, grid, points=None, samples_factory = SamplePointWithInterpolation, **kwargs):
self.grid = grid
points=self.grid._get_array_of_positions_from_arguments(pos=points,**kwargs)
self.samples = [samples_factory(grid, x) for x in points]
self.samples = [x for x in self.samples if x.isvalid ]
@late
def indices(self):
for x in self.samples:
yield x.index
@late
def positions(self):
for x in self.samples:
yield x.position
def __getattr__(self, name_of_the_attribute):
result = quantities.AdaptingVectorQuantity()
for x in self.samples:
result.append(getattr(x, name_of_the_attribute))
return result
def __iter__(self):
for x in len(self):
yield self[x]
def __getitem__(self, index):
return self.samples[index]
def __len__(self):
return len(self.samples)
class SamplePointsOnMultipleGrids(object):
def __init__(self, grids, points, samples_factory = SamplePointWithInterpolation, index_factory = None):
self.grids = grids
self.points = points
self.samples_factory = samples_factory
if index_factory is None:
self.index = None
else:
self.index = index_factory(self.grids)
def _grid_for_point(self, point):
if self.index is None:
for grid in self.grids:
if (numpy.all(point >= grid.get_minimum_position()) and
numpy.all(point < grid.get_maximum_position())):
return grid
return None
else:
return self.index.grid_for_point(point)
def filterout_duplicate_indices(self):
previous_grid = None
previous_index = None
filteredout = []
for x in self.samples:
if x.grid is previous_grid and numpy.all(x.index == previous_index):
pass
else:
previous_grid= x.grid
previous_index = x.index
filteredout.append(x)
self.samples = filteredout
def get_samples(self):
result = []
for x in self.points:
grid = self._grid_for_point(x)
if grid is None:
continue
sample = self.samples_factory(grid, x)
if not sample.isvalid:
continue
result.append(sample)
return result
@late
def samples(self):
result = []
for x in self.points:
grid = self._grid_for_point(x)
if grid is None:
continue
sample = self.samples_factory(grid, x)
if not sample.isvalid:
continue
result.append(sample)
return result
@late
def indices(self):
for x in self.samples:
yield x.index
@late
def positions(self):
for x in self.samples:
yield x.position
def __getattr__(self, name_of_the_attribute):
self.get_samples()
result = quantities.AdaptingVectorQuantity()
for x in self.samples:
result.append(getattr(x, name_of_the_attribute))
return result
def __iter__(self):
for x in len(self):
yield self[x]
def __getitem__(self, index):
return self.samples[index]
def __len__(self):
return len(self.samples)
class NonOverlappingGridsIndexer(object):
def __init__(self, grids):
self.grids = grids
self.setup_index()
@late
def minimum_position(self):
result = self.grids[0].get_minimum_position()
for x in self.grids[1:]:
minimum = x.get_minimum_position()
result = result.minimum(minimum)
return result
def setup_index(self):
smallest_boxsize = None
for x in self.grids:
boxsize = x.get_maximum_position() - x.get_minimum_position()
if smallest_boxsize is None:
smallest_boxsize = boxsize
else:
smallest_boxsize = boxsize.minimum(smallest_boxsize)
self.smallest_boxsize = smallest_boxsize
max_index = [0,0,0]
for x in self.grids:
index = (x.get_maximum_position() / smallest_boxsize)
index = numpy.floor(index).astype(numpy.int32)
max_index = numpy.where(index > max_index, index, max_index)
self.grids_on_index = numpy.zeros(max_index, 'int')
for index,x in enumerate(self.grids):
bottom_left = x.get_minimum_position()
index_of_grid = (bottom_left / smallest_boxsize)
size = ((x.get_maximum_position() - x.get_minimum_position()) / smallest_boxsize)
i,j,k = numpy.floor(index_of_grid).astype(numpy.int32)
ni,nj,nk = numpy.floor(size).astype(numpy.int32)
self.grids_on_index[i:i+ni,j:j+nj,k:k+nk] = index
def grid_for_point(self, position):
index = ((position - self.minimum_position) / self.smallest_boxsize)
index = numpy.floor(index).astype(numpy.int32)
index_of_grid = self.grids_on_index[tuple(index)]
return self.grids[index_of_grid]
def grids_for_points(self, points):
index = ((points - self.minimum_position) / self.smallest_boxsize)
index = numpy.floor(index).astype(numpy.int32)
index_of_grid = self.grids_on_index[tuple(index)]
return self.grids[index_of_grid]
# convenience function to convert input arguments to positions (or vector of "points")
def _get_array_of_positions_from_arguments(axes_names, **kwargs):
if kwargs.get('pos',None):
return kwargs['pos']
if kwargs.get('position',None):
return kwargs['position']
coordinates=[kwargs[x] for x in axes_names]
ndim=numpy.ndim(coordinates[0])
if ndim==0:
return VectorQuantity.new_from_scalar_quantities(*coordinates)
result=stack(coordinates)
order=tuple(range(1,ndim+1))+(0,)
return result.transpose(order)
| 42,731
| 37.187668
| 160
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/parameters.py
|
import weakref
import numpy
from amuse.units import nbody_system
from amuse.units import generic_unit_system
from amuse.units import quantities
from amuse.units.core import IncompatibleUnitsException
from amuse.units.quantities import is_quantity
from amuse.support import exceptions
from amuse.support.core import OrderedDictionary
class Parameters(object):
__name__ = 'Parameters'
def __init__(self, definitions, instance):
object.__setattr__(self, '_instance', weakref.ref(instance))
object.__setattr__(self, '_definitions', definitions)
object.__setattr__(self, '_mapping_from_name_to_definition', OrderedDictionary())
object.__setattr__(self, '_mapping_from_name_to_parameter', OrderedDictionary())
self.update()
def update(self):
for x in self._definitions:
self._mapping_from_name_to_definition[x.name] = x
if len(self._definitions)!=len(self._mapping_from_name_to_definition):
raise Exception("Duplicate parameters detected")
def __getattr__(self, name):
#if name.startswith('__'):
# return object.__getattribute__(self, name)
if not name in self._mapping_from_name_to_definition:
raise exceptions.CoreException(
f"tried to get unknown parameter '{name}' "
f"for a '{type(self._instance()).__name__}' object"
)
self._instance().before_get_parameter()
return self.get_parameter(name).get_value()
def __setattr__(self, name, value):
if not name in self._mapping_from_name_to_definition:
#~ print "Did you mean to set one of these parameters?\n", \
#~ "\n ".join(self._mapping_from_name_to_definition.keys())
raise exceptions.CoreException(
f"tried to set unknown parameter '{name}' for a "
f"'{type(self._instance()).__name__}' object"
)
self._instance().before_set_parameter()
return self.get_parameter(name).set_value(value)
def __getitem__(self, name):
return self.get_parameter(name)
def names(self):
return list(self._mapping_from_name_to_definition.keys())
def set_defaults(self):
self._instance().before_set_parameter()
for name in self.names():
parameter = self.get_parameter(name)
parameter.set_default_value()
def __dir__(self):
result = []
result.extend(dir(type(self)))
result.extend(self.names())
return result
def get_default_value_for(self, name):
if not name in self._mapping_from_name_to_definition:
raise exceptions.CoreException(
f"tried to get default value of unknown parameter '{name}'"
f"for a '{type(self._instance()).__name__}' object"
)
definition = self._mapping_from_name_to_definition[name]
return definition.get_default_value(self)
def __str__(self):
output = ""
for name in sorted(self.names()):
output += name + ": "
output += str(getattr(self, name))
if self.get_parameter(name).is_readonly():
output += " (read only)"
output += "\n"
return output
def get_parameter(self, name):
if not name in self._mapping_from_name_to_definition:
raise exceptions.AmuseException("{0!r} not defined as parameter".format(name))
if not name in self._mapping_from_name_to_parameter:
definition = self._mapping_from_name_to_definition[name]
self._mapping_from_name_to_parameter[name] = Parameter(definition, self)
return self._mapping_from_name_to_parameter[name]
def iter_parameters(self):
for name in self.names():
yield self.get_parameter(name)
__iter__=iter_parameters
def send_cached_parameters_to_code(self):
cached_parameters = [x for x in self.iter_parameters() if x.definition.is_cached()]
for x in cached_parameters:
if not x.is_set:
x.set_default_value()
functions = OrderedDictionary()
for x in cached_parameters:
definition = x.definition
if not definition.functionname in functions:
functions[definition.functionname] = []
functions[definition.functionname].append(x)
for functionname, parameters in functions.items():
object = self._instance()
method = getattr(object, functionname)
keyword_arguments = {}
for parameter in parameters:
keyword_arguments[parameter.definition.parameter_name] = parameter.get_cached_value()
errorcode = method(**keyword_arguments)
def send_not_set_parameters_to_code(self):
parameters = [x for x in self.iter_parameters() if x.must_set_to_default()]
for x in parameters:
x.set_default_value()
def check_defaults(self):
for x in self.iter_parameters():
default_value = self.get_default_value_for(x.definition.name)
try:
value = x.get_value()
except:
print("could not get value for:", x.definition.name, default_value)
continue
print(x.definition.name, value, default_value)
if not value == default_value:
print("!default value is not equal to value in code: {0}".format(x.definition.name))
def copy(self):
mapping_from_name_to_value = {}
for name in self.names():
mapping_from_name_to_value[name] = getattr(self, name)
return ParametersMemento(mapping_from_name_to_value)
def reset_from_memento(self, memento):
for name in memento.names():
if not name in self._mapping_from_name_to_definition:
raise exceptions.CoreException(
f"tried to set unknown parameter '{name}' for a "
f"'{type(self._instance()).__name__}' object"
)
if self.get_parameter(name).is_readonly():
if not getattr(memento, name) == getattr(self, name):
raise exceptions.CoreException(
f"tried to change read-only parameter '{name}' for a "
f"'{type(self._instance()).__name__}' object"
)
else:
setattr(self, name, getattr(memento, name))
def has_writable_parameter(self, name):
if not name in self._mapping_from_name_to_definition:
return False
return not self.get_parameter(name).is_readonly()
class ParametersMemento(object):
__name__ = 'Parameters'
def __init__(self, mapping_from_name_to_value = None):
if mapping_from_name_to_value is None:
mapping_from_name_to_value = {}
object.__setattr__(self, '_mapping_from_name_to_value', mapping_from_name_to_value)
def __getstate__(self):
return self.__dict__
def __setstate__(self,state):
object.__setattr__(self, '__dict__', state)
def __getattr__(self, name):
if not name in self._mapping_from_name_to_value:
raise exceptions.CoreException(
f"tried to get unknown parameter '{name}'"
)
return self._mapping_from_name_to_value[name]
def __setattr__(self, name, value):
if not name in self._mapping_from_name_to_value:
raise exceptions.CoreException(
f"tried to set unknown parameter '{name}'"
)
self._mapping_from_name_to_value[name] = value
def names(self):
return list(self._mapping_from_name_to_value.keys())
def set_defaults(self):
pass
def __dir__(self):
result = []
result.extend(dir(type(self)))
result.extend(self.names())
return result
def get_default_value_for(self, name):
if not name in self._mapping_from_name_to_value:
raise exceptions.CoreException(
f"tried to get default value of unknown parameter '{name}'"
)
raise exceptions.CoreException(
"tried to get default value, for a parameter in a parameters memento"
)
def __str__(self):
output = ""
for name in sorted(self.names()):
output += name + ": "
output += str(getattr(self, name))+"\n"
return output
def new_parameters_instance_with_docs(definitions, instance):
class _ParametersMetaclass(type):
def _get_doc(self):
output = "Parameters: \n"
for parameter_definition in definitions:
output += parameter_definition.name + "\n\n"
output += " " + parameter_definition.description
output += " (default value:" + str(parameter_definition.default_value) + ")\n\n"
return output
__doc__ = property(_get_doc)
class ParametersWithDocs(Parameters, metaclass=_ParametersMetaclass):
def _get_doc(self):
output = "Parameters: \n"
for parameter_definition in definitions:
output += parameter_definition.name + "\n\n"
output += " " + parameter_definition.description
output += " (default value:" + str(self.get_default_value_for(parameter_definition.name)) + ")\n\n"
return output
__doc__ = property(_get_doc)
return ParametersWithDocs(definitions, instance)
def new_parameters_with_units_converted_instance_with_docs(original, converter):
class _ParametersMetaclass(type):
def _convert_from_target_to_source_if_needed(value):
if isinstance(value, bool) or isinstance(value, quantities.NonNumericQuantity):
return value
else:
return converter.from_target_to_source(value)
def _get_doc(self):
output = "Parameters: \n"
for parameter_definition in original._definitions:
value = parameter_definition.default_value
if not isinstance(value, bool) and not isinstance(value, quantities.NonNumericQuantity):
value = converter.from_target_to_source(value)
output += parameter_definition.name + "\n\n"
output += " " + parameter_definition.description
output += " (default value:" + str(value) + ")\n\n"
return output
__doc__ = property(_get_doc)
class ParametersWithDocs(ParametersWithUnitsConverted, metaclass=_ParametersMetaclass):
def _get_doc(self):
output = "Parameters: \n"
for parameter_definition in original._definitions:
output += parameter_definition.name + "\n\n"
output += " " + parameter_definition.description
output += " (default value:" + str(self.get_default_value_for(parameter_definition.name)) + ")\n\n"
return output
__doc__ = property(_get_doc)
return ParametersWithDocs(original, converter)
class ParametersWithUnitsConverted(object):
def __init__(self, original, converter):
object.__setattr__(self, '_original', original)
object.__setattr__(self, '_converter', converter)
def __getattr__(self, name):
return self.convert_from_target_to_source_if_needed(getattr(self._original, name))
def __setattr__(self, name, value):
if not name in self._original._mapping_from_name_to_definition:
raise exceptions.CoreException(
f"Could not set unknown parameter '{name}' for a "
f"'{type(self._original()).__name__}' object"
)
try:
setattr(self._original, name, self._converter.from_source_to_target(value))
except IncompatibleUnitsException as ex:
setattr(self._original, name, value)
def names(self):
return self._original.names()
def set_defaults(self):
self._original.set_defaults()
def __dir__(self):
return dir(self._original)
def convert_from_target_to_source_if_needed(self, value):
if isinstance(value, bool) or isinstance(value, quantities.NonNumericQuantity):
return value
else:
return self._converter.from_target_to_source(value)
def get_default_value_for(self, name):
return self.convert_from_target_to_source_if_needed(self._original.get_default_value_for(name))
def __str__(self):
output = ""
for name in sorted(self.names()):
output += name + ": "
output += str(getattr(self, name))
output += " default: " + str(self.get_default_value_for(name))
output +="\n"
return output
def check_defaults(self):
for x in self.iter_parameters():
default_value = self.get_default_value_for(x.definition.name)
try:
value = x.get_value()
except:
print("could not get value for:", x.definition.name, default_value)
continue
print(x.definition.name, value, default_value)
if not value == default_value:
print("default value is not equal to value in code: {0}".format(x.definition.name))
class AbstractParameterDefinition(object):
def __init__(self, name, description):
self.name = name
self.description = description
def get_default_value(self, parameterset):
return self.default_value
def get_value(self, parameter, object):
raise exceptions.AmuseException("not implemented")
def set_value(self, parameter, object, quantity):
raise exceptions.AmuseException("not implemented")
def set_default_value(self, parameter, object):
pass
def is_readonly(self):
return False
def is_cached(self):
return False
def must_set_to_default_if_not_set(self):
return True
class AliasParameterDefinition(AbstractParameterDefinition):
def __init__(self, name, aliased_name, description, alias_set=None):
AbstractParameterDefinition.__init__(self, name, description)
self.aliased_name = aliased_name
self.alias_set = alias_set
self.default_value = None
def get_default_value(self, parameter_set):
return parameter_set.get_parameter(self.aliased_name).definition.get_default_value(parameter_set)
def get_value(self, parameter, object):
if self.alias_set:
parameter_set=getattr(object, self.alias_set)
else:
parameter_set=parameter.parameter_set
return getattr(parameter_set, self.aliased_name)
def set_value(self, parameter, object, quantity):
if self.alias_set:
parameter_set=getattr(object, self.alias_set)
else:
parameter_set=parameter.parameter_set
return setattr(parameter_set, self.aliased_name, quantity)
def set_default_value(self, parameter, object):
pass
def is_readonly(self):
return False
def is_cached(self):
return False
def must_set_to_default_if_not_set(self):
return False
class ParameterDefinition(AbstractParameterDefinition):
def __init__(self, name, description, default_value, must_set_before_get = False):
AbstractParameterDefinition.__init__(self, name, description)
self.default_value = default_value
self.must_set_before_get = must_set_before_get
def get_value(self, parameter, object):
raise NotImplementedError()
def set_value(self, parameter, object, quantity):
raise NotImplementedError()
def set_default_value(self, parameter, object):
if self.default_value is None :
return None
if self.is_readonly():
return None
self.set_value(parameter, object, self.default_value)
def is_readonly(self):
return False
def is_cached(self):
return False
class InterfaceParameterDefinition(ParameterDefinition):
def __init__(self, name, description, default_value,state_guard=None):
AbstractParameterDefinition.__init__(self, name, description)
self.default_value = default_value
self.must_set_before_get = False
self.value=default_value
self.state_guard=state_guard
def get_value(self, parameter, object):
try:
x=self.value.copy()
except:
x=self.value
return x
def set_value(self, parameter, object, quantity):
try:
self.value=quantity.copy()
except:
self.value=quantity
if self.state_guard:
getattr(object, self.state_guard)()
def must_set_to_default_if_not_set(self):
return False
class ParameterException(AttributeError):
template = ("Could not {0} value for parameter '{1}' of a '{2}' object, got errorcode <{3}>")
def __init__(self, object, parameter_name, errorcode, is_get):
AttributeError.__init__(self, self.template.format(
"get" if is_get else "set",
parameter_name,
type(object).__name__,
errorcode
))
self.errorcode = errorcode
self.parameter_name = parameter_name
class ModuleMethodParameterDefinition(ParameterDefinition):
def __init__(self, get_method, set_method, name, description, default_value = None, must_set_before_get = False):
ParameterDefinition.__init__(self, name, description, default_value, must_set_before_get)
self.get_method = get_method
self.set_method = set_method
self.stored_value = None
def get_value(self, parameter, object):
if self.must_set_before_get and not parameter.is_set:
self.set_default_value(parameter, object)
if self.get_method is None:
return self.stored_value
else:
return getattr(object, self.get_method)()
def set_value(self, parameter, object, quantity):
#if self.unit.is_non_numeric() or len(self.unit.base) == 0:
# if not isinstance(quantity, quantities.Quantity):
# quantity = quantity | self.unit
if self.set_method is None:
raise exceptions.CoreException(
f"Could not set value for parameter '{self.name}' of a "
f"'{type(object).__name__}' object, parameter is read-only"
)
getattr(object, self.set_method)(quantity)
if self.get_method is None:
self.stored_value = quantity
parameter.is_set = True
def is_readonly(self):
return self.set_method is None
class ModuleBooleanParameterDefinition(ModuleMethodParameterDefinition):
def __init__(self, *args, **kwargs):
ModuleMethodParameterDefinition.__init__(self, *args, **kwargs)
def get_value(self, parameter, object):
return True if ModuleMethodParameterDefinition.get_value(self, parameter, object) else False
def set_value(self, parameter, object, bool):
return ModuleMethodParameterDefinition.set_value(self, parameter, object, 1 if bool else 0)
class ModuleCachingParameterDefinition(ParameterDefinition):
def __init__(self, functionname, parameter_name, name, description, default_value = None):
ParameterDefinition.__init__(self, name, description, default_value, must_set_before_get = True)
self.stored_value = None
self.parameter_name = parameter_name
self.functionname = functionname
def get_value(self, parameter, object):
if self.must_set_before_get and not parameter.is_set:
self.set_default_value(parameter, object)
return parameter.cached_value
def set_value(self, parameter, object, quantity):
if is_quantity(self.default_value):
unit = self.default_value.unit
if unit.is_non_numeric() or len(unit.base) == 0:
if not is_quantity(quantity):
quantity = quantity | unit
parameter.cached_value = quantity
parameter.is_set = True
def is_cached(self):
return True
class Parameter(object):
def __init__(self, definition, parameter_set):
self.parameter_set = parameter_set
self.definition = definition
self.is_set = False
def get_value(self):
return self.definition.get_value(self, self.parameter_set._instance())
def set_value(self, quantity):
return self.definition.set_value(self, self.parameter_set._instance(), quantity)
def set_default_value(self):
self.definition.set_default_value(self, self.parameter_set._instance())
def is_readonly(self):
return self.definition.is_readonly()
def must_set_to_default(self):
if self.definition.is_cached():
return False
return (not self.is_set) and self.definition.must_set_to_default_if_not_set()
def get_cached_value(self):
return self.definition.get_value(self, self.parameter_set._instance())
@property
def name(self):
return self.definition.name
@property
def description(self):
return self.definition.description
@property
def value(self):
return self.get_value()
def __str__(self):
return self.name
class ModuleVectorMethodParameterDefinition(ModuleMethodParameterDefinition):
def get_value(self, parameter, object):
if self.must_set_before_get and not parameter.is_set:
self.set_default_value(parameter, object)
if self.get_method is None:
return self.stored_value
else:
list_of_scalars = getattr(object, self.get_method)()
result = quantities.AdaptingVectorQuantity()
result.extend(list_of_scalars)
return result.copy()
def set_value(self, parameter, object, vector_quantity):
if self.set_method is None:
raise exceptions.CoreException(
f"Could not set value for parameter '{self.name}' of a "
f"'{type(object).__name__}' object, parameter is read-only"
)
getattr(object, self.set_method)(*vector_quantity)
if self.get_method is None:
self.stored_value = vector_quantity
parameter.is_set = True
def is_readonly(self):
return self.set_method is None
class VectorParameterDefinition(AbstractParameterDefinition):
def __init__(self, name, description, names_of_parameters, default_value):
AbstractParameterDefinition.__init__(self, name, description)
self.names_of_parameters = names_of_parameters
self.default_value = default_value
def get_value(self, parameter, object):
all_parameters = parameter.parameter_set
result = []
unit = None
for name in self.names_of_parameters:
parameter = all_parameters.get_parameter(name)
element = parameter.get_value()
if unit is None:
if is_quantity(element):
unit = element.unit
if not unit is None:
result.append(element.value_in(unit))
else:
result.append(element)
if not unit is None:
return unit.new_quantity(result)
else:
return numpy.asarray(result)
def set_value(self, parameter, object, quantity):
all_parameters = parameter.parameter_set
for index,name in enumerate(self.names_of_parameters):
parameter = all_parameters.get_parameter(name)
parameter.set_value(quantity[index])
def must_set_to_default_if_not_set(self):
return False
def get_unit(self, parameter):
result = None
all_parameters = parameter.parameter_set
for index,name in enumerate(self.names_of_parameters):
parameter = all_parameters.get_parameter(name)
if hasattr(parameter.definition, "unit"):
result = parameter.definition.unit
return result
# to do: higher dimensional array parameters
class ModuleArrayParameterDefinition(ParameterDefinition):
def __init__(self, get_method, set_method, range_method, name, description):
ParameterDefinition.__init__(self, name, description, None, False)
self.get_method = get_method
self.set_method = set_method
self.range_method = range_method
self.stored_value = None
def get_value(self, parameter, object):
if self.get_method is None:
return self.stored_value
else:
irange=getattr(object, self.range_method)()
index=numpy.arange(irange[0],irange[1]+1)
return getattr(object, self.get_method)(index)
def set_value(self, parameter, object, quantity):
if self.set_method is None:
raise exceptions.CoreException(
"Could not set value for parameter '{self.name}' of a "
"'{type(object).__name__}' object, parameter is read-only"
)
irange=getattr(object, self.range_method)()
index=numpy.arange(irange[0],irange[1]+1)
getattr(object, self.set_method)(index,quantity)
if self.get_method is None:
self.stored_value = quantity
parameter.is_set = True
def is_readonly(self):
return self.set_method is None
| 26,325
| 33.593955
| 117
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/rotation.py
|
import numpy
from amuse.units.trigo import cos, sin
def new_rotation_matrix(phi, theta, psi):
"""
Return the rotation matrix, to rotate positions, around the x-axis (phi), y-axis (theta) and z-axis (psi).
See wikipedia for reference
"""
return numpy.array( (
(cos(theta)*cos(psi), -cos(phi)*sin(psi) + sin(phi)*sin(theta)*cos(psi), sin(phi)*sin(psi) + cos(phi)*sin(theta)*cos(psi)) ,
(cos(theta)*sin(psi), cos(phi)*cos(psi) + sin(phi)*sin(theta)*sin(psi), -sin(phi)*cos(psi) + cos(phi)*sin(theta)*sin(psi)) ,
(-sin(theta) , sin(phi)*cos(theta) , cos(phi)*cos(theta))
) )
def rotated(positions, phi, theta, psi):
"""
Return the positions, rotated by phi, theta and psi around the x, y and z axes
"""
# print phi, theta, psi
rotation_matrix = new_rotation_matrix(phi, theta, psi)
# print "RT=", rotation_matrix
return positions.dot(rotation_matrix.transpose())
def rotate(particles, phi, theta, psi):
"""
Rotate the positions and the velocities around 0,0,0.
"""
particles.position = rotated(particles.position, phi, theta, psi)
particles.velocity = rotated(particles.velocity, phi, theta, psi)
def add_spin(particles, omega):
"""
Add solid-body rotation to the velocity of the particles, relative to the
center-of-mass position.
"""
if not omega.is_vector():
omega = omega * [0.0, 0.0, 1.0]
particles.velocity += omega.cross(particles.position - particles.center_of_mass())
| 1,551
| 36.853659
| 133
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/grid_attributes.py
|
from amuse.units.quantities import zero, as_vector_quantity, column_stack
import numpy
from amuse.datamodel import base
from amuse.datamodel import grids
# maintain for backwards compatibility, these should go..
grids.Grid.add_global_vector_attribute("position", ["x","y","z"])
grids.Grid.add_global_vector_attribute("momentum", ["rhovx","rhovy","rhovz"])
grids.Grid.add_global_vector_attribute("magnetic_field", ["B1i","B2i","B3i"])
@grids.BaseGrid.caching_function_for_set
def cellsize(grid):
raise Exception("a {0} does not have a constant cellsize, use the cellsizes method instead".format(grid.__class__.__name__))
@grids.RegularBaseGrid.caching_function_for_set
def cellsize(grid):
"""Returns the lenght of each direction in the grid.
Works for regular and cartesian grids.
"""
result = []
Ndim= len(grid.shape)
cell1 = grid[(0,)*Ndim]
for i in range(len( grid[grid.get_minimum_index()].position )): # shape of position not necessarily same as shape of the grid?
if grid.shape[i] > 1:
cell2=grid[(0,)*i+(1,)+(0,)*(Ndim-1-i)]
result.append((cell2.position-cell1.position)[i])
return as_vector_quantity(result)
@grids.BaseGrid.caching_function_for_set
def get_minimum_index(grid):
raise Exception("not implemented")
@grids.StructuredBaseGrid.caching_function_for_set
def get_minimum_index(grid):
return tuple(numpy.zeros_like(grid.shape))
@grids.BaseGrid.caching_function_for_set
def get_maximum_index(grid):
raise Exception("not implemented")
@grids.StructuredBaseGrid.caching_function_for_set
def get_maximum_index(grid):
return tuple(grid.shape - numpy.ones_like(grid.shape))
@grids.BaseGrid.caching_function_for_set
def get_minimum_position(grid):
raise Exception("not implemented")
@grids.RectilinearBaseGrid.caching_function_for_set
def get_minimum_position(grid):
return grid[grid.get_minimum_index()].position - 0.5 * grid.cellsize()
@grids.BaseGrid.caching_function_for_set
def get_maximum_position(grid):
raise Exception("not implemented")
@grids.RectilinearBaseGrid.caching_function_for_set
def get_maximum_position(grid):
return grid[grid.get_maximum_index()].position + 0.5 * grid.cellsize()
@grids.BaseGrid.caching_function_for_set
def get_volume(grid):
raise Exception("not implemented")
@grids.RectilinearBaseGrid.caching_function_for_set
def get_volume(grid):
maximum_position = grid.get_maximum_position()
minimum_position = grid.get_minimum_position()
delta = maximum_position - minimum_position
return delta.prod()
@grids.BaseGrid.function_for_set
def contains(grid, points):
raise Exception("not implemented")
@grids.RectilinearBaseGrid.function_for_set
def contains(grid, points):
return numpy.logical_and(
numpy.all(points >= grid.get_minimum_position(), axis=len(points.shape)-1),
numpy.all(points < grid.get_maximum_position(), axis=len(points.shape)-1)
)
@grids.BaseGrid.function_for_set
def points(grid):
raise Exception("not implemented")
@grids.RegularBaseGrid.function_for_set
def points(grid):
shape=grid.shape
dx = grid.cellsize()/2
cell_centers=grid.position
shape_with_boundary = numpy.asarray(cell_centers.shape) + 1
shape_with_boundary[-1] -= 1
result = numpy.zeros(shape_with_boundary)* cell_centers.flat[0]
for i in range(2**len(shape)):
slicing=()
offset=[]
for j in range(len(shape)):
if i & 2**j:
slicing+=(slice(1,None),)
offset.append(1)
else:
slicing+=(slice(None,-1),)
offset.append(-1)
result[slicing]=cell_centers+dx*numpy.asarray(offset)
return result
@grids.BaseGrid.function_for_set
def connectivity(grid):
raise Exception("not implemented")
@grids.RegularBaseGrid.function_for_set
def connectivity(grid):
cellcenters = grid.position
shape = numpy.asarray(cellcenters.shape)
dim=len(shape)-1
shape[-1] = 2**dim
shape_with_boundary = numpy.asarray(cellcenters.shape) + 1
shape_with_boundary = shape_with_boundary[:dim]
indices = numpy.arange(0, numpy.prod(shape_with_boundary), dtype=numpy.int32).reshape(shape_with_boundary)
result = numpy.zeros(shape, dtype=numpy.int32)
for i in range(2**dim):
slicing1=()
slicing2=()
for j in range(dim):
if i & 2**j:
slicing2+=(slice(1,None),)
if len(slicing1) == 0 or slicing1[-1] is not Ellipsis:
slicing1+=(Ellipsis,)
else:
slicing2+=(slice(None,-1),)
if len(slicing1) == 0 or slicing1[-1] is not Ellipsis:
slicing1+=(Ellipsis,)
slicing1+=(i,)
result[slicing1]=indices[slicing2]
return result
@grids.BaseGrid.function_for_set
def overlaps(grid, grid1,eps=None):
raise Exception("not implemented")
@grids.RectilinearBaseGrid.function_for_set
def overlaps(grid, grid1,eps=None):
"""simple test for overlap
optional keyword parameter:
[eps]: size of buffer (to ignore just touching regions)
"""
minp=grid.get_minimum_position()
maxp=grid.get_maximum_position()
minp1=grid1.get_minimum_position()
maxp1=grid1.get_maximum_position()
if eps is not None:
minp+=eps
maxp-=eps
minp1+=eps
maxp1-=eps
if (maxp<=minp1).sum()>0 or (minp>=maxp1).sum()>0:
return False
return True
@grids.BaseGrid.function_for_set
def get_overlap_with(grid, grid1,eps=None):
raise Exception("not implemented")
@grids.RegularBaseGrid.function_for_set
def get_overlap_with(grid, grid1,eps=None):
"""return overlapping subgrid"""
if not grid.overlaps(grid1,eps):
return None
minindex=grid.get_minimum_index()
maxindex=grid.get_maximum_index()
cellsize=grid.cellsize()
minp=grid.get_minimum_position()
maxp=grid.get_maximum_position()
minp1=grid1.get_minimum_position()
maxp1=grid1.get_maximum_position()
if eps is not None:
minp1+=eps
maxp1-=eps
index_of_minp1=numpy.maximum( numpy.array((minp1-minp)/cellsize,'int'), minindex[:len(cellsize)])
index_of_maxp1=numpy.minimum( numpy.array((maxp1-minp)/cellsize,'int'), maxindex[:len(cellsize)])
slices=()
for i,j in zip(index_of_minp1,index_of_maxp1):
slices+=(slice(i,j+1),)
if len(slices)!= len(minindex): slices+=(Ellipsis,)
return grid[slices]
#@grids.AbstractGrid.function_for_set
#def select_fully_inside(grid, cellsizes=(), coordinates=()):
# """returns boolean array with cells with cellsizes centered on
# coordinates fully inside the grid """
# gridminx,gridminy=sys.grid.get_minimum_position()
# gridmaxx,gridmaxy=sys.grid.get_maximum_position()
@grids.BaseGrid.function_for_set
def get_index(grid, pos=None, **kwargs):
raise Exception("not implemented for a {0} grid".format(grid.__class__.__name__))
@grids.RegularBaseGrid.function_for_set
def get_index(grid, pos=None, **kwargs):
pos=grid._get_array_of_positions_from_arguments(pos=pos,**kwargs)
offset = pos - grid.get_minimum_position()
indices = (offset / grid.cellsize())
return numpy.floor(indices).astype(numpy.int32)
@grids.BaseGrid.function_for_set
def _get_array_of_positions_from_arguments(grid, **kwargs):
return grids._get_array_of_positions_from_arguments(grid.get_axes_names(), **kwargs)
| 7,549
| 32.259912
| 130
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/binding.py
| 1
| 0
| 0
|
py
|
|
amuse
|
amuse-main/src/amuse/datamodel/__init__.py
|
"""
This module provides access to all set handling
in AMUSE. The actual implementation is in the
base, storage and particle modules.
"""
from amuse.datamodel.base import *
from amuse.datamodel.memory_storage import *
from amuse.datamodel.particles import *
from amuse.datamodel.grids import *
from amuse.datamodel import particle_attributes
from amuse.datamodel import grid_attributes
| 388
| 28.923077
| 48
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/console.py
|
import warnings
from amuse.support.console import *
warnings.warn("amuse.datamodel.console has moved to amuse.support.console", DeprecationWarning)
| 150
| 24.166667
| 95
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/trees.py
|
class BinaryTreesOnAParticleSet(object):
def __init__(self, particles_set, name_of_firstchild_attribute, name_of_secondchild_attribute):
self.particles_set = particles_set
self.name_of_firstchild_attribute = name_of_firstchild_attribute
self.name_of_secondchild_attribute = name_of_secondchild_attribute
def iter_roots(self):
return self.iter_binary_trees()
def iter_binary_trees(self):
binaries = self._binaries()
if len(binaries) == 0:
return
binaries_children1 = self._get_inner_nodes(binaries, self.name_of_firstchild_attribute)
binaries_children2 = self._get_inner_nodes(binaries, self.name_of_secondchild_attribute)
roots = (binaries - (binaries_children1 + binaries_children2))
for particle in roots:
yield BinaryTreeOnParticle(particle, self.name_of_firstchild_attribute, self.name_of_secondchild_attribute)
def particles_not_in_a_multiple(self):
binaries = self._binaries()
binaries_children1 = self._get_descendant_nodes(self.particles_set, self.name_of_firstchild_attribute)
binaries_children2 = self._get_descendant_nodes(self.particles_set, self.name_of_secondchild_attribute)
if len(binaries) == 0:
return self.particles_set
else:
singles = (self.particles_set - (self.roots() + binaries_children1 + binaries_children2))
return singles
def roots(self):
binaries = self._binaries()
if len(binaries) == 0:
return binaries
binaries_children1 = self._get_inner_nodes(binaries, self.name_of_firstchild_attribute)
binaries_children2 = self._get_inner_nodes(binaries, self.name_of_secondchild_attribute)
return (binaries - (binaries_children1 + binaries_children2))
def _binaries(self):
return self.particles_set.select_array(lambda x : x != [None], [self.name_of_firstchild_attribute,])
def _get_inner_nodes(self, set, name_of_attribute):
descendants = self._get_descendant_nodes(set, name_of_attribute)
return descendants.select_array(lambda x : x != [None], [name_of_attribute,])
def _get_descendant_nodes(self, set, name_of_attribute):
return getattr(set, name_of_attribute).as_set().compressed()
class BinaryTreeOnParticle(object):
def __init__(self, particle, name_of_firstchild_attribute = "child1" , name_of_secondchild_attribute = "child2"):
self.particle = particle
self.name_of_firstchild_attribute = name_of_firstchild_attribute
self.name_of_secondchild_attribute = name_of_secondchild_attribute
def iter_descendants(self):
stack = [self.particle]
while len(stack) > 0:
current = stack.pop()
children = []
child1 = getattr(current, self.name_of_firstchild_attribute)
if not child1 is None:
yield child1
children.append(child1)
child2 = getattr(current, self.name_of_secondchild_attribute)
if not child2 is None:
yield child2
children.append(child2)
stack.extend(reversed(children))
def iter_leafs(self):
stack = [self.particle]
while len(stack) > 0:
current = stack.pop()
children = []
child1 = getattr(current, self.name_of_firstchild_attribute)
if not child1 is None:
children.append(child1)
child2 = getattr(current, self.name_of_secondchild_attribute)
if not child2 is None:
children.append(child2)
stack.extend(reversed(children))
if len(children) == 0:
yield current
def iter_inner_nodes(self):
stack = [self.particle]
while len(stack) > 0:
current = stack.pop()
children = []
child1 = getattr(current, self.name_of_firstchild_attribute)
if not child1 is None:
children.append(child1)
child2 = getattr(current, self.name_of_secondchild_attribute)
if not child2 is None:
children.append(child2)
stack.extend(reversed(children))
if len(children) > 0:
yield current
def get_inner_nodes_subset(self):
keys = [x.key for x in self.iter_inner_nodes()]
return self.particle.particles_set._subset(keys)
def get_descendants_subset(self):
keys = [x.key for x in self.iter_descendants()]
return self.particle.particles_set._subset(keys)
def get_leafs_subset(self):
keys = [x.key for x in self.iter_leafs()]
return self.particle.particles_set._subset(keys)
def copy(self):
copy_of_set = self.get_tree_subset().copy()
root = copy_of_set[0]
return BinaryTreeOnParticle(
root,
name_of_firstchild_attribute = self.name_of_firstchild_attribute,
name_of_secondchild_attribute = self.name_of_secondchild_attribute
)
def get_tree_subset(self):
keys = [x.key for x in iter(self)]
return self.particle.particles_set._subset(keys)
def iter_events(self):
stack = [('start', self.particle)]
while len(stack) > 0:
event, current = stack.pop()
yield event,current
if event == 'end':
continue
stack.append( ('end', current, ) )
children = []
child1 = getattr(current, self.name_of_firstchild_attribute)
if not child1 is None:
children.append( ('start', child1, ) )
child2 = getattr(current, self.name_of_secondchild_attribute)
if not child2 is None:
children.append( ('start', child2, ) )
stack.extend(reversed(children))
def iter_levels(self):
level = -1
for event, particle in self.iter_events():
if event == 'start':
level += 1
yield level, particle
else:
level -= 1
def __iter__(self):
stack = [self.particle]
while len(stack) > 0:
current = stack.pop()
yield current
children = []
child1 = getattr(current, self.name_of_firstchild_attribute)
if not child1 is None:
children.append(child1)
child2 = getattr(current, self.name_of_secondchild_attribute)
if not child2 is None:
children.append(child2)
stack.extend(reversed(children))
class ChildTreeOnParticleSet(object):
def __init__(self, particles_set, names_of_child_attributes = ["child1" , "child2"]):
self.particles_set = particles_set
self.names_of_child_attributes = names_of_child_attributes
@property
def particle(self):
return self.particle_set #a particle set associated, maybe upgrade to some aggregate particle
def is_leaf(self):
return False
def iter_leafs(self):
for node in self.iter_children():
if node.is_leaf():
yield node
def iter_branches(self):
for node in self.iter_children():
if not node.is_leaf():
yield node
def iter_descendants(self):
stack = list(reversed(list(self.iter_children())))
while len(stack) > 0:
current = stack.pop()
yield current
children = list(current.iter_children())
stack.extend(reversed(children))
def iter_events(self):
stack = [('start', x) for x in (reversed(list(self.iter_children())))]
while len(stack) > 0:
event, current = stack.pop()
yield event,current
if event == 'end':
continue
stack.append( ('end', current, ) )
children = list(current.iter_children())
stack.extend([('start', x) for x in (reversed(list(self.iter_children())))])
def iter_descendant_leafs(self):
stack = list(reversed(list(self.iter_children())))
while len(stack) > 0:
current = stack.pop()
if current.is_leaf():
yield current
else:
children = list(current.iter_children())
stack.extend(reversed(children))
def iter_descendant_branches(self):
stack = list(reversed(list(self.iter_children())))
while len(stack) > 0:
current = stack.pop()
if not current.is_leaf():
yield current
children = list(current.iter_children())
stack.extend(reversed(children))
def iter_children(self):
for particle in self._children():
yield ChildTreeOnParticle(particle, self.names_of_child_attributes)
def iter_levels(self):
level = -1
for event, particle in self.iter_events():
if event == 'start':
level += 1
yield level, particle
else:
level -= 1
def get_children(self):
return list(self.iter_children())
def __iter__(self):
return self.iter_children()
def _branches(self):
binaries = self._binaries()
result = binaries
for name in self.names_of_child_attributes:
result -= self._get_inner_nodes(binaries, name)
return result
def _inner_particles(self):
result = None
for name in self.names_of_child_attributes:
descendants = self._get_descendant_nodes(self.particles_set, name)
if result is None:
result = descendants
else:
result += descendants
return result
def _children(self):
return self.particles_set - self._inner_particles()
def _get_descendant_nodes(self, set, name_of_attribute):
return getattr(set, name_of_attribute).as_set().compressed()
def get_children_subset(self):
return self._children()
class ChildTreeOnParticle(object):
def __init__(self, particle, names_of_child_attributes = ["child1" , "child2"]):
self.particle = particle
self.names_of_child_attributes = names_of_child_attributes
def iter_descendants(self):
stack = [self.particle]
while len(stack) > 0:
current = stack.pop()
children = []
for name in self.names_of_child_attributes:
child = getattr(current, name)
if not child is None:
yield ChildTreeOnParticle(child, self.names_of_child_attributes)
children.append(child)
stack.extend(reversed(children))
def is_leaf(self):
for name in self.names_of_child_attributes:
child = getattr(self.particle, name)
if not child is None:
return False
return True
def is_binary(self):
if not len(self.names_of_child_attributes) == 2:
return False
for child in self.iter_children():
if not child.is_leaf():
return False
return True
def iter_leafs(self):
for node in self.iter_children():
if node.is_leaf():
yield node
def iter_branches(self):
for node in self.iter_children():
if not node.is_leaf():
yield node
def iter_descendant_leafs(self):
stack = list(reversed(list(self.iter_children())))
while len(stack) > 0:
current = stack.pop()
if current.is_leaf():
yield current
else:
children = list(current.iter_children())
stack.extend(reversed(children))
def iter_descendant_branches(self):
stack = list(reversed(list(self.iter_children())))
while len(stack) > 0:
current = stack.pop()
if not current.is_leaf():
yield current
children = list(current.iter_children())
stack.extend(reversed(children))
def get_children_subset(self):
keys = [x.particle.key for x in self.iter_children()]
return self.particle.particles_set._subset(keys)
def iter_children(self):
current = self.particle
for name in self.names_of_child_attributes:
child = getattr(current, name)
if not child is None:
yield ChildTreeOnParticle(child, self.names_of_child_attributes)
def get_children(self):
return list(self.iter_children())
def get_children_particles(self):
return [x.particle for x in self.iter_children()]
def __iter__(self):
return self.iter_children()
def iter_levels(self):
level = -1
for event, particle in self.iter_events():
if event == 'start':
level += 1
yield level, particle
else:
level -= 1
def iter_events(self):
stack = [('start', self,), ]
while len(stack) > 0:
event, current = stack.pop()
yield event,current
if event == 'end':
continue
stack.append( ('end', current, ) )
children = list(current.iter_children())
stack.extend([('start', x) for x in reversed(children)])
| 14,471
| 32.422633
| 119
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/simple_hash.py
|
import os
import ctypes
import numpy
from amuse.support import exceptions
import threading
# run a little code, to create import error
# for numpy in pypy
keys=numpy.ascontiguousarray([1,2,3], dtype="uintp")
keys.ctypes
class cell(ctypes.Structure):
_fields_=[("key", ctypes.c_size_t),
("value", ctypes.c_size_t)]
cell_pointer=ctypes.POINTER(cell)
c_size_t_pointer=ctypes.POINTER(ctypes.c_size_t)
c_int_pointer=ctypes.POINTER(ctypes.c_int)
class simple_hash(ctypes.Structure):
_fields_=[("m_cells", cell_pointer),
("m_arraySize", ctypes.c_size_t),
("m_population", ctypes.c_size_t),
("m_zeroUsed",ctypes.c_bool),
("m_zeroCell",cell)]
from amuse.support import get_amuse_root_dir
librarypath=os.path.join(get_amuse_root_dir(),"lib","simple_hash","libsimplehash.so")
lib_simple_hash=ctypes.CDLL(librarypath)
class SimpleHash(object):
def __init__(self):
self._map=simple_hash()
self._dummy=ctypes.c_size_t()
self._lib=lib_simple_hash
self._map_ref = ctypes.byref(self._map)
if self._lib.init_hash(ctypes.byref(self._map),128)!=0:
raise MemoryError("allocation of SimpleHash")
self.lock=threading.Lock()
def __del__(self):
self._lib.end_hash(self._map_ref)
def lookup(self,inkeys):
N=len(inkeys)
keys=numpy.ascontiguousarray(inkeys, dtype="uintp")
values=numpy.ascontiguousarray(numpy.zeros(N),dtype="uintp")
errors=numpy.ascontiguousarray(numpy.zeros(N),dtype="int32")
ckeys=keys.ctypes.data_as(c_size_t_pointer)
cvalues=values.ctypes.data_as(c_size_t_pointer)
cerrors=errors.ctypes.data_as(c_int_pointer)
with self.lock:
err=self._lib.hash_lookups(self._map_ref,N,ckeys,cvalues, cerrors)
if err != 0:
has_errors = errors!=0
missing_keys = keys[has_errors]
no_errors = ~has_errors
raise exceptions.KeysNotInStorageException(keys[no_errors], values[no_errors], missing_keys)
return values
def insert(self,keys,values=None):
N=len(keys)
keys=numpy.ascontiguousarray(keys, dtype="uintp")
if values is None:
values=numpy.arange(N,dtype="uintp")
else:
assert len(keys)==len(values)
values=numpy.ascontiguousarray(values,dtype="uintp")
ckeys=keys.ctypes.data_as(c_size_t_pointer)
cvalues=values.ctypes.data_as(c_size_t_pointer)
with self.lock:
err=self._lib.hash_inserts(self._map_ref,N,ckeys,cvalues)
if err!=0:
raise Exception("simple hash insert error")
def reindex(self, keys, values=None):
with self.lock:
self._lib.end_hash(self._map_ref)
if self._lib.init_hash(self._map_ref,len(keys))!=0:
raise MemoryError("allocation of SimpleHash")
self.insert(keys, values)
def key_present(self,key):
with self.lock:
return self._lib.hash_lookup(self._map_ref,ctypes.c_size_t(key),ctypes.byref(self._dummy))==0
def keys_present(self,keys):
pass
def match(self,keys):
N=len(keys)
keys=numpy.ascontiguousarray(keys, dtype="uintp")
values=numpy.ascontiguousarray(numpy.zeros(N),dtype="uintp")
errors=numpy.ascontiguousarray(numpy.zeros(N),dtype="int32")
ckeys=keys.ctypes.data_as(c_size_t_pointer)
cvalues=values.ctypes.data_as(c_size_t_pointer)
cerrors=errors.ctypes.data_as(c_int_pointer)
with self.lock:
err=self._lib.hash_lookups(self._map_ref,N,ckeys,cvalues, cerrors)
state = errors != 0
return keys[state], values[state], keys[~state]
if __name__=="__main__":
N=1000
sm=SimpleHash()
keys=[1,3,5,6,32,2]
sm.reindex(keys)
print(sm.lookup([5,32]))
del sm
| 3,904
| 31.815126
| 105
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/set.py
|
"""
A set is a collection of values with the same units and precision
"""
| 74
| 17.75
| 65
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/values.py
|
import warnings
from amuse.units.quantities import *
warnings.warn("amuse.datamodel.values has moved to amuse.units.quantities, use amuse.units.quantities instead", DeprecationWarning)
| 188
| 26
| 131
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/incode_storage.py
|
"""This module defines the classes needed to map
functions defined by the codes into particle sets
and grids.
The attribute values of Particles or Gridpoints are
stored in Particle Sets or Grids. These sets or grids
manage:
1. The storage allocation (deletion and removal of particles)
2. The attribute access (getting or setting the value(s) of attribute(s))
3. Queries or selections of particles (selection of subsets of particles)
All 3 functions can be provided by a code. The classes in this
module provide a mapping from the functions in a code to
the datamodel used in AMUSE.
When a code manages a particular set all the data of that set is
stored in the memory space of that code. The code needs to provide
functions to acces the data in the set.
.. note::
Most codes already implement a particle set or a grid.
The only extra requirement for AMUSE is to provide functions to access
this set. When a code does not have any knowlegde of sets or grids, the
management will take place in AMUSE and only some data transfer code
is needed
All incode storage is build on mapping attributes to functions. These
mappings are provided by a number of helper classes:
**setter/getter**
:py:class:`ParticleGetAttributesMethod`
Given particle indices or gridpoints (i,j,k) return a vector quantity
for each attribute
:py:class:`ParticleSetAttributesMethod`
Send values to the code given particle indices or gridpoints (i,j,k)
and a vector quantities for each attribute.
**new/delete**
:py:class:`NewParticleMethod`
Given vector quantities for attributes return the indices
of newly allocated particles
**function**
:py:class:`ParticleMethod`
Given particle indices or gridpoints (i,j,k) and optional arguments
return one or more vector quantities
**selection**
:py:class:`ParticleSpecificSelectMethod`
Given a particle return a subset of particles. For links between
particles (nearest neighbor, subparticle)
:py:class:`ParticleQueryMethod`
Retrieve indices from the code and return a subset of particles.
For selection of a limited number of particles by the code (get
the escaper)
:py:class:`ParticleSetSelectSubsetMethod`
Like ParticleQueryMethod but can handle larger subsets of
particles, the code can provide a special function
the return the number of particles in the set.
The InCode storage system is based on a number of classes:
:py:class:`AbstractInCodeAttributeStorage`
Handle attribute set/get functionality but no particle or
grid management
:py:class:`InCodeAttributeStorage`
Subclass of AbstractInCodeAttributeStorage, manages particles
:py:class:`InCodeGridAttributeStorage`
Subclass of AbstractInCodeAttributeStorage, manages grids
"""
from amuse.support.methods import AbstractCodeMethodWrapper
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import quantities
from amuse.units.quantities import is_quantity
from amuse.support.core import late
from amuse.support import exceptions
import numpy
import inspect
from amuse.datamodel import parameters
from amuse.datamodel import base
from amuse.datamodel import Particles, ParticlesSuperset
from amuse.datamodel import ParticleInformationChannel
from amuse.datamodel import Particle
from amuse.datamodel import Grid
from amuse.datamodel import AttributeStorage
from amuse.rfi.async_request import ASyncRequestSequence, PoolDependentASyncRequest
try:
from types import EllipsisType
except:
EllipsisType = type(Ellipsis)
class ParticleMappingMethod(AbstractCodeMethodWrapper):
def __init__(self, method, attribute_names = None):
AbstractCodeMethodWrapper.__init__(self, method)
if attribute_names is None:
self._attribute_names = []
else:
self._attribute_names = attribute_names
@late
def name_of_the_indexing_parameter(self):
return 'index_of_the_particle'
class ParticleGetAttributesMethod(ParticleMappingMethod):
"""
Instances wrap other methods and provide mappings
from attribute names to results.
Simple attribute getter methods take an array of indices
and return a tuple with arrays of result values.
.. code-block:: python
x, y, z = instance.get_xyz(indices)
Instances of this class make it possible to access the
return values by their attribute names.
For this it employs two strategies:
1. It uses the provided array of names and
maps each name to the positional output.
2. If no array of names is provided it asks the wrapped
method for all the names of the output parameters
(this scheme only works for legacy
functions or for wrapped legacy functions)
"""
def __init__(self, method, attribute_names = None):
ParticleMappingMethod.__init__(self, method, attribute_names)
@late
def attribute_names(self):
if self._attribute_names:
return self._attribute_names
else:
result = []
for x in self.method_output_argument_names:
if x == self.name_of_the_indexing_parameter:
continue
else:
result.append(x)
return result
def check_arguments(self, storage, attributes_to_return, *indices):
if len(indices[0]) > 1:
if self.method_is_legacy and not (self.method.specification.can_handle_array or self.method.specification.must_handle_array):
raise Exception(
"getter method {0} cannot handle arrays".format(self.method)
)
elif self.method_is_code:
if not self.method.legacy_specification is None:
if not (self.method.legacy_specification.can_handle_array or self.method.legacy_specification.must_handle_array):
raise exceptions.AmuseException(
"getter method {0} cannot handle arrays".format(self.method)
)
def convert_return_value(self, return_value, storage, attributes_to_return):
if len(self.attribute_names) == 1:
return_value = (return_value,)
set_of_attributes_to_return = set(attributes_to_return)
result = {}
if self.index_output_attributes:
index_output_attributes = self.index_output_attributes
else:
index_output_attributes = [False] * len(return_value)
for value, attribute, isindex in zip(return_value, self.attribute_names, index_output_attributes):
if attribute in set_of_attributes_to_return:
if isindex:
result[attribute] = quantities.new_quantity(storage._get_keys_for_indices_in_the_code(value), units.object_key)
else:
result[attribute] = value
return result
def get_attribute_values(self, storage, attributes_to_return, *indices):
self.check_arguments(storage, indices, attributes_to_return)
try:
return_value = self.method(*indices, **storage.extra_keyword_arguments_for_getters_and_setters)
except:
print((self.method))
raise
return self.convert_return_value(return_value, storage, attributes_to_return)
def get_attribute_values_async(self, storage, attributes_to_return, *indices):
self.check_arguments(storage, indices, attributes_to_return)
def result_handler(inner):
return self.convert_return_value(inner(), storage, attributes_to_return)
async_request = self.method.asynchronous(*indices, **storage.extra_keyword_arguments_for_getters_and_setters)
async_request.add_result_handler(result_handler)
return async_request
class ParticleGetGriddedAttributesMethod(ParticleGetAttributesMethod):
def __init__(self, method, get_range_method, attribute_names = None):
ParticleGetAttributesMethod.__init__(self, method, attribute_names)
self.get_range_method = get_range_method
def get_attribute_values(self, storage, attributes_to_return, *indices):
self.check_arguments(storage, indices, attributes_to_return)
minmax_per_dimension = self.get_range_method( **storage.extra_keyword_arguments_for_getters_and_setters)
result = [slice(0, len(indices[0]))]
gridshape=[len(indices[0])]
for ind in indices[1:]:
result.append(slice(0, 1))
for i in range(0, len(minmax_per_dimension), 2):
minval = minmax_per_dimension[i]
maxval = minmax_per_dimension[i+1]
result.append(slice(minval, maxval+1))
gridshape.append(maxval+1-minval)
grid_indices = numpy.mgrid[tuple(result)]
u=grid_indices[0].copy()
for i, ind in enumerate(indices):
grid_indices[i] = numpy.asarray(ind)[u]
one_dimensional_arrays_of_indices = [x.reshape(-1) for x in grid_indices]
try:
return_value = self.method(*one_dimensional_arrays_of_indices, **storage.extra_keyword_arguments_for_getters_and_setters)
except:
print((self.method))
raise
mapping_from_name_to_value = self.convert_return_value(return_value, storage, attributes_to_return)
for key, value in mapping_from_name_to_value.items():
mapping_from_name_to_value[key] = value.reshape(gridshape)
return mapping_from_name_to_value
class ParticleSetAttributesMethod(ParticleMappingMethod):
"""
Instances wrap other methods and provide mappings
from attribute names to input parameters.
Simple attribute setter methods take an array of indices
and one or more arrays of new values.
.. code-block:: python
instance.set_xyz(indices, x, y, z)
Instances of this class make it possible to access the
possitional parameters with attribute names.
.. Note::
the index argument is assumed to always come first!
For this it employs two strategies:
1. It uses the provided array of names and
maps each name to the positional output.
2. If no array of names is provided it asks the wrapped
method for all the names of the input parameters
(this scheme works for legacy
functions and sometimes for python native functions (if
they have named arguments))
"""
def __init__(self, method, attribute_names = None):
ParticleMappingMethod.__init__(self, method, attribute_names)
@late
def attribute_names(self):
if self._attribute_names:
return self._attribute_names
else:
result = []
for x in self.method_input_argument_names:
if x == self.name_of_the_indexing_parameter:
continue
else:
result.append(x)
return result
@late
def optional_attribute_names(self):
if hasattr(self.method, 'optional_method_input_argument_names'):
return self.method.optional_method_input_argument_names
else:
return []
@late
def names_to_index(self):
result = {}
for index, name in enumerate(self.attribute_names):
result[name] = index
return result
def set_attribute_values(self, storage, attributes, values, *indices):
list_arguments = list(indices)
list_args, keyword_args = self.convert_attributes_and_values_to_list_and_keyword_arguments(attributes, values)
list_arguments.extend(list_args)
keyword_args.update(storage.extra_keyword_arguments_for_getters_and_setters)
self.method(*list_arguments, **keyword_args)
def set_attribute_values_async(self, storage, attributes, values, *indices, **extra_keyword_arguments_for_getters_and_setters):
keyword_args = {}
keyword_args.update(storage.extra_keyword_arguments_for_getters_and_setters)
keyword_args.update(extra_keyword_arguments_for_getters_and_setters)
list_arguments = list(indices)
list_args, keyword_args2 = self.convert_attributes_and_values_to_list_and_keyword_arguments(attributes, values)
keyword_args.update(keyword_args2)
list_arguments.extend(list_args)
async_request = self.method.asynchronous(*list_arguments, **keyword_args)
return async_request
def convert_attributes_and_values_to_list_and_keyword_arguments(self, attributes, values):
not_set_marker = object()
list_arguments = [not_set_marker] * (len(self.attribute_names))
names_to_index = self.names_to_index
for attribute, quantity in zip(attributes, values):
if attribute in names_to_index:
index = names_to_index[attribute]
list_arguments[index] = quantity
default_argument_found = False
missing_attributes = []
dict_arguments = {}
for index, x in enumerate(list_arguments):
if x is not_set_marker:
name_of_attribute = self.attribute_names[index]
if not name_of_attribute in self.optional_attribute_names:
missing_attributes.append(name_of_attribute)
else:
default_argument_found = True
elif default_argument_found:
name_of_attribute = self.attribute_names[index]
if not name_of_attribute in self.optional_attribute_names:
raise exceptions.AmuseException("Optional before required arguments")
dict_arguments[name_of_attribute] = x
list_arguments[index] = not_set_marker
if len(missing_attributes) > 0:
if len(missing_attributes) == 1:
missing_attributes_string = "{0!r} attribute".format(missing_attributes[0])
else:
missing_attributes_string = "{0!r} and {1!r} attributes".format(", ".join(missing_attributes[:-1]), missing_attributes[-1])
raise exceptions.MissingAttributesAmuseException(
missing_attributes,
"To add particles to this code you need to specify the {0}".format(missing_attributes_string))
list_arguments = [x for x in list_arguments if not x is not_set_marker]
return list_arguments, dict_arguments
class ParticleSetGriddedAttributesMethod(ParticleSetAttributesMethod):
def __init__(self, method, get_range_method, attribute_names = None):
ParticleSetAttributesMethod.__init__(self, method, attribute_names)
self.get_range_method = get_range_method
def set_attribute_values(self, storage, attributes, values, *indices):
list_args, keyword_args = self.convert_attributes_and_values_to_list_and_keyword_arguments(attributes, values)
minmax_per_dimension = self.get_range_method( **storage.extra_keyword_arguments_for_getters_and_setters)
result = [slice(0, len(indices[0]))]
gridshape=[len(indices[0])]
for ind in indices[1:]:
result.append(slice(0, 1))
for i in range(0, len(minmax_per_dimension), 2):
minval = minmax_per_dimension[i]
maxval = minmax_per_dimension[i+1]
result.append(slice(minval, maxval+1))
gridshape.append(maxval+1-minval)
grid_indices = numpy.mgrid[tuple(result)]
u=grid_indices[0].copy()
for i, ind in enumerate(indices):
grid_indices[i] = numpy.asarray(ind)[u]
one_dimensional_arrays_of_indices = [x.reshape(-1) for x in grid_indices]
list_arguments = list(grid_indices)
list_arguments.extend(list_args)
one_dimensional_arrays_of_args = [x.reshape(-1) for x in list_arguments]
for key, value in keyword_args.items():
keyword_args[key] = value.reshape(-1)
keyword_args.update(storage.extra_keyword_arguments_for_getters_and_setters)
self.method(*one_dimensional_arrays_of_args, **keyword_args)
def set_attribute_values_async(self, storage, attributes, values, *indices):
keyword_args = {}
keyword_args.update(storage.extra_keyword_arguments_for_getters_and_setters)
list_args, keyword_args2 = self.convert_attributes_and_values_to_list_and_keyword_arguments(attributes, values)
keyword_args.update(keyword_args2)
# print keyword_args2
minmax_per_dimension = self.get_range_method( **storage.extra_keyword_arguments_for_getters_and_setters)
result = [slice(0, len(indices[0]))]
gridshape=[len(indices[0])]
for ind in indices[1:]:
result.append(slice(0, 1))
for i in range(0, len(minmax_per_dimension), 2):
minval = minmax_per_dimension[i]
maxval = minmax_per_dimension[i+1]
result.append(slice(minval, maxval+1))
gridshape.append(maxval+1-minval)
grid_indices = numpy.mgrid[tuple(result)]
u=grid_indices[0].copy()
for i, ind in enumerate(indices):
grid_indices[i] = numpy.asarray(ind)[u]
one_dimensional_arrays_of_indices = [x.reshape(-1) for x in grid_indices]
list_arguments = list(grid_indices)
list_arguments.extend(list_args)
one_dimensional_arrays_of_args = [x.reshape(-1) for x in list_arguments]
for key, value in keyword_args.items():
keyword_args[key] = value.reshape(-1)
async_request = self.method.asynchronous(*one_dimensional_arrays_of_args, **keyword_args)
return async_request
class NewParticleMethod(ParticleSetAttributesMethod):
"""
Instances wrap a method to create particles. The method may
take attributes values to set initial values on
the created particles.
The new particle functions work a lot like
the set attribute methods, only the new particle
function is supposed to return an array
of the indices of the created particles.
.. code-block:: python
indices = instance.new_particle(x, y, z)
"""
def __init__(self, method, attribute_names = None):
ParticleSetAttributesMethod.__init__(self, method, attribute_names)
def add_entities(self, attributes, values):
list_arguments,keyword_arguments = self.convert_attributes_and_values_to_list_and_keyword_arguments(attributes, values)
indices = self.method(*list_arguments, **keyword_arguments)
return indices
class ParticleQueryMethod(object):
"""
Instances wrap a function that can take one or more arguments
and returns an index (or a list of indices, if the arguments are
lists). This method is most useful to select one particle form
all particles in the set
.. code-block:: python
index = instance.get_escaper()
The index or indices are converted to a particle subset.
"""
def __init__(self, method, names = (), public_name = None, query_superset=False):
self.method = method
self.name_of_the_out_parameters = names
self.public_name = public_name
if query_superset:
self.apply = self.apply_for_superset
else:
self.apply = self.apply_normal
def apply_normal(self, particles, *args, **kwargs):
indices = self.method(*args, **kwargs)
keys = particles._private.attribute_storage._get_keys_for_indices_in_the_code(indices)
return particles._subset(keys)
def apply_for_superset(self, particles, *args, **kwargs):
indices = self.method(*args, **kwargs)
subset_results = []
for subset in particles._private.particle_sets:
keys = []
for index in indices:
if index in subset._private.attribute_storage.mapping_from_index_in_the_code_to_particle_key:
keys.append(subset._private.attribute_storage.mapping_from_index_in_the_code_to_particle_key[index])
subset_results.append(subset._subset(keys))
return ParticlesSuperset(subset_results)
class ParticleSpecificSelectMethod(object):
"""
Instances wrap a function that can take a particle index
and returns one or more indices
(but a limited and fixed number of indices). This method is most
useful to return links between particles (subparticles or
nearest neighbors)
.. code-block:: python
output_index = instance.get_nearest_neigbord(input_index)
The idex or indices are converted to a particle subset.
"""
def __init__(self, method, names = (), public_name = None):
self.method = method
self.name_of_the_out_parameters = names
self.public_name = public_name
def apply_on_all(self, particles):
all_indices = list(particles._private.attribute_storage.mapping_from_index_in_the_code_to_particle_key.keys())
lists_of_indices = self.method(list(all_indices))
lists_of_keys = []
for indices in lists_of_indices:
keys = particles._private.attribute_storage._get_keys_for_indices_in_the_code(indices)
lists_of_keys.append(keys)
result = []
for keys in zip(lists_of_keys):
result.append(particles._subset(keys))
return result
def apply_on_one(self, set, particle):
index = set._private.attribute_storage.get_indices_of(particle.key)
result = self.method(index)
keys = set._private.attribute_storage._get_keys_for_indices_in_the_code(result)
result = []
return particle.as_set()._subset(keys)
class ParticleMethod(AbstractCodeMethodWrapper):
"""
Instances wrap a function that returns quanties given particle
indices and optional arguments. Instances have a lot in common
with attribute getters, but can take extra arguments.
.. code-block:: python
pressure = instance.get_pressure(index, gamma)
"""
def __init__(self, method, public_name = None):
AbstractCodeMethodWrapper.__init__(self, method)
self.public_name = public_name
def apply_on_all(self, particles, *list_arguments, **keyword_arguments):
storage = particles._private.attribute_storage
all_indices = list(storage.mapping_from_index_in_the_code_to_particle_key.keys())
return self.method(all_indices, *list_arguments, **keyword_arguments)
def apply_on_one(self, set, particle, *list_arguments, **keyword_arguments):
storage = particle.particles_set._private.attribute_storage
index = storage.get_indices_of([particle.key])
return self.method(index[0], *list_arguments, **keyword_arguments)
class ParticleSetSelectSubsetMethod(object):
"""
Generic method to query and retrieve particles from the
set. This selection can have up to tree stages:
1. start the query given a number of optional arguments
2. get the number of selected particles
3. get the index of each particle
The pseudo-code for this selection is:
.. code-block:: python
set_selection_criteria(r = 10.0 | units.m)
n = get_number_of_selected_particles()
for i in range(n):
particle_index = get_index_of_selected_particle(i)
The first and second step are optional. If no number of
particles method is provided the class assumes the selection
only returns 1 particle.
Generalisation of ParticleQueryMethod
"""
def __init__(self, method, set_query_arguments_method = None, get_number_of_particles_in_set_method = None, public_name = None):
self.method = method
self.set_query_arguments_method = set_query_arguments_method
self.get_number_of_particles_in_set_method = get_number_of_particles_in_set_method
self.public_name = public_name
def apply_on_all(self, particles, *list_arguments, **keyword_arguments):
query_identifiers = None
if not self.set_query_arguments_method is None:
query_identifiers = self.set_query_arguments_method(*list_arguments, **keyword_arguments)
if query_identifiers is None:
query_identifiers = ()
elif not hasattr(query_identifiers, '__iter__'):
query_identifiers = (query_identifiers,)
if not self.get_number_of_particles_in_set_method is None:
number_of_particles_in_set = self.get_number_of_particles_in_set_method(*query_identifiers)
indices = self.method(list(range(number_of_particles_in_set)))
else:
index = self.method(*query_identifiers)
indices = [index]
query_identifiers = [ [x]*len(indices) for x in query_identifiers ]
keys = particles._private.attribute_storage._get_keys_for_indices_in_the_code(indices, *query_identifiers)
return particles._subset(keys)
class ParticlesAddedUpdateMethod(object):
def __init__(self, get_number_of_particles_added_method = None, get_id_of_added_particles_method = None):
self.get_number_of_particles_added_method = get_number_of_particles_added_method
self.get_id_of_added_particles_method = get_id_of_added_particles_method
def apply_on_all(self, particles, *list_arguments, **keyword_arguments):
query_identifiers = None
if not self.set_query_arguments_method is None:
query_identifiers = self.set_query_arguments_method(*list_arguments, **keyword_arguments)
if query_identifiers is None:
query_identifiers = ()
elif not hasattr(query_identifiers, '__iter__'):
query_identifiers = (query_identifiers,)
if not self.get_number_of_particles_in_set_method is None:
number_of_particles_in_set = self.get_number_of_particles_in_set_method(*query_identifiers)
indices = self.method(list(range(number_of_particles_in_set)))
else:
index = self.method(*query_identifiers)
indices = [index]
query_identifiers = [ [x]*len(indices) for x in query_identifiers ]
keys = particles._private.attribute_storage._get_keys_for_indices_in_the_code(indices, *query_identifiers)
return particles._subset(keys)
class ParticleGetIndexMethod(object):
"""
Instances return the index of a particle in the code
"""
ATTRIBUTE_NAME = "index_in_code"
def __init__(self):
pass
@late
def attribute_names(self):
return [self.ATTRIBUTE_NAME]
def get_attribute_values(self, storage, attributes_to_return, *indices):
return {self.ATTRIBUTE_NAME : indices[0]}
class AbstractInCodeAttributeStorage(base.AttributeStorage):
"""
Abstract base storage for incode attribute storage.
It provides functions to handle getters and setters of
attributes but not for creating or deleting of particles as
this differs between grids and particle sets.
"""
def __init__(self,
code_interface,
setters,
getters,
extra_keyword_arguments_for_getters_and_setters = {},
):
self.code_interface = code_interface
self.getters = list(getters)
self.setters = setters
self.attributes = set([])
for x in self.getters:
self.attributes |= set(x.attribute_names)
for x in self.setters:
self.attributes |= set(x.attribute_names)
self.writable_attributes = set([])
for x in self.setters:
self.writable_attributes |= set(x.attribute_names)
self.extra_keyword_arguments_for_getters_and_setters = extra_keyword_arguments_for_getters_and_setters
def select_getters_for(self, attributes):
set_of_attributes = set(attributes)
# first check for an exact match
result = [getter for getter in self.getters if set(getter.attribute_names) == set_of_attributes]
if result:
return result
# sort methods on attribute lengths, longest first
sorted_getters = sorted(self.getters, key=lambda x : len(x.attribute_names), reverse = True)
# next, select the longest fitting method(s), to minize the number of calls
for access_method in sorted_getters:
if set_of_attributes >= set(access_method.attribute_names):
result.append(access_method)
set_of_attributes -= set(access_method.attribute_names)
# next, select the sortest method(s), to minimize the extra parameters
if set_of_attributes:
for access_method in reversed(sorted_getters):
if set_of_attributes & set(access_method.attribute_names):
result.append(access_method)
set_of_attributes -= set(access_method.attribute_names)
if set_of_attributes:
raise exceptions.AmuseException("Do not have attributes {0}".format(sorted(set_of_attributes)))
return result
def select_setters_for(self, attributes):
set_of_attributes = set(attributes)
result = []
for access_method in self.setters:
if set_of_attributes >= set(access_method.attribute_names):
result.append(access_method)
set_of_attributes -= set(access_method.attribute_names)
if set_of_attributes:
raise exceptions.AmuseException("Cannot set attributes {0}".format(sorted(set_of_attributes)))
return result
def get_defined_attribute_names(self):
return sorted(self.attributes)
def get_defined_settable_attribute_names(self):
return sorted(self.writable_attributes)
class InCodeAttributeStorage(AbstractInCodeAttributeStorage):
"""
Manages sets of particles stored in codes.
Maps indices returned by the code to keys defined in AMUSE.
"""
def __init__(self,
code_interface,
new_particle_method,
delete_particle_method,
number_of_particles_method,
setters,
getters,
name_of_the_index):
for x in getters:
x.name_of_the_indexing_parameter = name_of_the_index
for x in setters:
x.name_of_the_indexing_parameter = name_of_the_index
getters = list(getters)
AbstractInCodeAttributeStorage.__init__(self, code_interface, setters, getters)
self.mapping_from_particle_key_to_index_in_the_code = {}
self.mapping_from_index_in_the_code_to_particle_key = {}
self.particle_keys = numpy.zeros(0)
self.code_indices = numpy.zeros(0)
self._get_number_of_particles = number_of_particles_method
self.delete_particle_method = delete_particle_method
self.new_particle_method = new_particle_method
self.getters.append(ParticleGetIndexMethod())
def __len__(self):
return len(self.mapping_from_particle_key_to_index_in_the_code)
def can_extend_attributes(self):
return False
def add_particles_to_store(self, keys, attributes = [], values = []):
indices = self.new_particle_method.add_entities(attributes, values)
if len(self.particle_keys) > 0:
previous_length = len(self.particle_keys)
self.particle_keys = numpy.concatenate((self.particle_keys, numpy.array(list(keys))))
self.code_indices = numpy.concatenate((self.code_indices, numpy.array(indices)))
result = self.code_indices[previous_length:]
else:
self.particle_keys = numpy.array(keys)
self.code_indices = numpy.array(indices)
result = self.code_indices
index = 0
for key in keys:
if key in self.mapping_from_particle_key_to_index_in_the_code:
raise Exception("particle with same key added twice: {0}".format(key))
self.mapping_from_particle_key_to_index_in_the_code[key] = indices[index]
self.mapping_from_index_in_the_code_to_particle_key[indices[index]] = key
index = index + 1
return result
def get_indices_of(self, keys):
indices_in_the_code = []
if keys is None:
keys = self.particle_keys
notfoundkeys = []
foundkeys = []
for particle_key in keys:
try:
indices_in_the_code.append(self.mapping_from_particle_key_to_index_in_the_code[particle_key])
foundkeys.append(particle_key)
except KeyError:
notfoundkeys.append(particle_key)
if not len(notfoundkeys) == 0:
raise exceptions.KeysNotInStorageException(
numpy.asarray(foundkeys),
numpy.asarray(indices_in_the_code),
numpy.asarray(notfoundkeys)
)
return numpy.asarray(indices_in_the_code)
def get_key_indices_of(self, keys):
result = []
if keys is None:
keys = self.particle_keys
keys_set = set(keys)
for index in range(len(self.particle_keys)):
key = self.particle_keys[index]
if key in keys_set:
result.append(index)
return result
def get_positions_of_indices(self, indices):
result = []
if indices is None:
indices = self.code_indices
indices_set = set(indices)
for index in range(len(self.code_indices)):
index_in_code = self.code_indices[index]
if index_in_code in indices_set:
result.append(index)
return result
def get_value_of(self, index, attribute):
return self.get_value_in_store(index, attribute)
def get_values_in_store(self, indices_in_the_code, attributes):
if indices_in_the_code is None or isinstance(indices_in_the_code, EllipsisType):
indices_in_the_code = self.code_indices
if len(indices_in_the_code) == 0:
return [[] for attribute in attributes]
mapping_from_attribute_to_result = {}
for getter in self.select_getters_for(attributes):
result = getter.get_attribute_values(self, attributes, indices_in_the_code)
mapping_from_attribute_to_result.update(result)
results = []
for attribute in attributes:
results.append(mapping_from_attribute_to_result[attribute])
return results
def get_values_in_store_async(self, indices_in_the_code, attributes):
if indices_in_the_code is None:
indices_in_the_code = self.code_indices
if len(indices_in_the_code) == 0:
return [[] for attribute in attributes]
mapping_from_attribute_to_result = {}
getters = self.select_getters_for(attributes)
if len(getters) > 1:
def new_request(index, getters, attributes, indices_in_the_code):
if index >= len(getters):
return None
getter = getters[index]
request = getter.get_attribute_values_async(self, attributes, indices_in_the_code)
def result_handler(inner, mapping):
mapping.update(inner())
request.add_result_handler(result_handler, (mapping_from_attribute_to_result,))
return request
request = ASyncRequestSequence(new_request, args=(getters,attributes, indices_in_the_code))
else:
for getter in getters:
request = getter.get_attribute_values_async(self, attributes, indices_in_the_code)
def result_handler(inner, mapping):
mapping.update(inner())
request.add_result_handler(result_handler, (mapping_from_attribute_to_result,))
def all_handler(inner, mapping):
inner()
results = []
for attribute in attributes:
results.append(mapping[attribute])
return results
request.add_result_handler(all_handler, (mapping_from_attribute_to_result,))
return request
def set_values_in_store(self, indices_in_the_code, attributes, values):
if indices_in_the_code is None:
indices_in_the_code = self.code_indices
if len(indices_in_the_code) == 0:
return
for setter in self.select_setters_for(attributes):
setter.set_attribute_values(self, attributes, values, indices_in_the_code)
def set_values_in_store_async(self, indices_in_the_code, attributes, values):
if indices_in_the_code is None:
indices_in_the_code = self.code_indices
if len(indices_in_the_code) == 0:
return
setters = self.select_setters_for(attributes)
if len(setters) > 1:
def new_request(index, setters, attributes, values, indices_in_the_code):
if index >= len(setters):
return None
setter = setters[index]
request = setter.set_attribute_values_async(self, attributes, values, indices_in_the_code)
return request
request = ASyncRequestSequence(new_request, args=(setters,attributes, values, indices_in_the_code))
else:
for setter in setters:
request = setter.set_attribute_values(self, attributes, values, indices_in_the_code)
return request
def remove_particles_from_store(self, indices_in_the_code):
if indices_in_the_code is None:
return
self.delete_particle_method(indices_in_the_code)
mapping_key = self.mapping_from_particle_key_to_index_in_the_code
mapping_index = self.mapping_from_index_in_the_code_to_particle_key
for i in indices_in_the_code:
key = mapping_index[i]
del mapping_index[i]
del mapping_key[key]
indices_to_delete = self.get_positions_of_indices(indices_in_the_code)
self.particle_keys = numpy.delete(self.particle_keys, indices_to_delete)
self.code_indices = numpy.delete(self.code_indices, indices_to_delete)
def get_all_keys_in_store(self):
return self.particle_keys
def get_all_indices_in_store(self):
return self.code_indices
def has_key_in_store(self, key):
return key in self.mapping_from_particle_key_to_index_in_the_code
def _get_keys_for_indices_in_the_code(self, indices):
result = []
for i in indices:
result.append(self.mapping_from_index_in_the_code_to_particle_key.get(i, 0))
return result
def _remove_indices(self, indices):
keys = []
for i in indices:
if i in self.mapping_from_index_in_the_code_to_particle_key:
key = self.mapping_from_index_in_the_code_to_particle_key[i]
del self.mapping_from_index_in_the_code_to_particle_key[i]
del self.mapping_from_particle_key_to_index_in_the_code[key]
keys.append(key)
indices_to_delete = self.get_key_indices_of(keys)
self.particle_keys = numpy.delete(self.particle_keys, indices_to_delete)
self.code_indices = numpy.delete(self.code_indices, indices_to_delete)
def _add_indices(self, indices):
keys = []
for i in indices:
if i in self.mapping_from_index_in_the_code_to_particle_key:
raise exceptions.AmuseException("adding an index '{0}' that is already managed, bookkeeping is broken".format(i))
newkey = next(base.UniqueKeyGenerator)
self.mapping_from_index_in_the_code_to_particle_key[i] = newkey
self.mapping_from_particle_key_to_index_in_the_code[newkey] = i
keys.append(newkey)
if len(self.particle_keys) > 0:
self.particle_keys = numpy.concatenate((self.particle_keys,
numpy.asarray(list(keys), dtype=self.particle_keys.dtype)))
self.code_indices = numpy.concatenate((self.code_indices,
numpy.asarray(list(indices), dtype=self.code_indices.dtype)))
else:
self.particle_keys = numpy.array(keys)
self.code_indices = numpy.array(indices)
class InCodeGridAttributeStorage(AbstractInCodeAttributeStorage):
"""
Manages grids stored in codes.
"""
def __init__(self,
code_interface,
get_range_method,
setters,
getters,
extra_keyword_arguments_for_getters_and_setters = {},
):
AbstractInCodeAttributeStorage.__init__(self, code_interface, setters, getters, extra_keyword_arguments_for_getters_and_setters)
self.get_range_method = get_range_method
self._indices_grid = None
def can_extend_attributes(self):
return False
def storage_shape(self):
try:
minmax_per_dimension = self.get_range_method(**self.extra_keyword_arguments_for_getters_and_setters)
result = []
for i in range(0, len(minmax_per_dimension), 2):
minval = minmax_per_dimension[i]
maxval = minmax_per_dimension[i+1]
result.append(maxval - minval + 1)
return tuple(result)
except:
import traceback
traceback.print_exc()
raise
def add_particles_to_store(self, keys, attributes = [], quantities = []):
raise exceptions.AmuseException("adding points to the grid is not implemented")
def remove_particles_from_store(self, keys):
raise exceptions.AmuseException("removing points from the grid is not implemented")
def _to_arrays_of_indices(self, index):
#imin, imax, jmin, jmax, kmin, kmax = self.get_range_method(**self.extra_keyword_arguments_for_getters_and_setters)
if self._indices_grid is None:
minmax_per_dimension = self.get_range_method(**self.extra_keyword_arguments_for_getters_and_setters)
result = []
for i in range(0, len(minmax_per_dimension), 2):
minval = minmax_per_dimension[i]
maxval = minmax_per_dimension[i+1]
result.append(slice(minval, maxval+1))
self._indices_grid = numpy.mgrid[tuple(result)]
indices = self._indices_grid
if index is None:
return indices
else:
return [x[index] for x in indices]
def get_values_in_store(self, indices, attributes):
array_of_indices = self._to_arrays_of_indices(indices)
mapping_from_attribute_to_result = {}
one_dimensional_array_of_indices = [x.reshape(-1) for x in array_of_indices]
for getter in self.select_getters_for(attributes):
result = getter.get_attribute_values(self, attributes, *one_dimensional_array_of_indices)
mapping_from_attribute_to_result.update(result)
results = []
for attribute in attributes:
returned_value = mapping_from_attribute_to_result[attribute]
if len(array_of_indices)==0:
value=returned_value
elif len(array_of_indices[0].shape) == 0:
value = returned_value[0]
else:
if len(returned_value)!=numpy.prod(array_of_indices[0].shape):
raise Exception("unexpected mismatch of array shapes")
if isinstance(returned_value,list):
returned_value=numpy.asarray(returned_value)
value = returned_value.reshape(array_of_indices[0].shape+returned_value.shape[1:])
results.append(value)
return results
def get_values_in_store_async(self, indices, attributes):
array_of_indices = self._to_arrays_of_indices(indices)
mapping_from_attribute_to_result = {}
one_dimensional_array_of_indices = [x.reshape(-1) for x in array_of_indices]
pool=None
for getter in self.select_getters_for(attributes):
request = getter.get_attribute_values_async(self, attributes, *one_dimensional_array_of_indices)
def result_handler(inner, mapping):
mapping.update(inner())
request.add_result_handler(result_handler, (mapping_from_attribute_to_result,))
pool=request.join(pool) # requests can join with None!
def all_handler(inner, mapping):
inner()
results = []
for attribute in attributes:
results.append(mapping[attribute])
return results
request=PoolDependentASyncRequest(pool)
request.add_result_handler(all_handler, (mapping_from_attribute_to_result,))
return request
def set_values_in_store(self, indices, attributes, quantities):
array_of_indices = self._to_arrays_of_indices(indices)
one_dimensional_array_of_indices = [x.reshape(-1) for x in array_of_indices]
if len(one_dimensional_array_of_indices)==0:
one_dimensional_values = [x for x in quantities]
else:
one_dimensional_values = [(x.reshape(-1) if is_quantity(x) else numpy.asanyarray(x).reshape(-1)) for x in quantities]
for setter in self.select_setters_for(attributes):
setter.set_attribute_values(self, attributes, one_dimensional_values, *one_dimensional_array_of_indices)
def set_values_in_store_async(self, indices, attributes, quantities):
array_of_indices = self._to_arrays_of_indices(indices)
one_dimensional_array_of_indices = [x.reshape(-1) for x in array_of_indices]
if len(one_dimensional_array_of_indices)==0:
one_dimensional_values = [x for x in quantities]
else:
one_dimensional_values = [(x.reshape(-1) if is_quantity(x) else numpy.asanyarray(x).reshape(-1)) for x in quantities]
selected_setters = list([setter for setter in self.select_setters_for(attributes)])
def next_request(index, setters):
if index < len(setters):
setter = setters[index]
return setter.set_attribute_values_async(self, attributes, one_dimensional_values, *one_dimensional_array_of_indices)
else:
return None
request = ASyncRequestSequence(next_request, args = (selected_setters,))
return request
def has_key_in_store(self, key):
return key in self.mapping_from_particle_to_index
def get_all_keys_in_store(self):
return Ellipsis
def __len__(self):
shape = self.storage_shape()
return shape[0] * shape[1] * shape[2]
def copy(self):
from .memory_storage import InMemoryGridAttributeStorage
copy = InMemoryGridAttributeStorage()
for attribute, attribute_values in self.mapping_from_attribute_to_quantities.items():
copy.mapping_from_attribute_to_quantities[attribute] = attribute_values.copy()
return copy
def get_defined_attribute_names(self):
return sorted(self.attributes)
def _get_writeable_attribute_names(self):
return self.writable_attributes
def get_defined_settable_attribute_names(self):
return sorted(self.writable_attributes)
class ParticleSpecificSelectSubsetMethod(object):
"""
Instances wrap a function that can take a particle index, plus a list
offset and returns one index. This method is most
useful to return links between particles (subparticles or
nearest neighbors). Instances also need a function to get
the number of links.
.. code-block:: python
output_index = instance.get_nearest_neigbors(index_of_the_particle, input_index)
The index or indices are converted to a particle subset.
"""
def __init__(self, method, get_number_of_particles_in_set_method = None, public_name = None):
self.method = method
self.public_name = public_name
self.get_number_of_particles_in_set_method = get_number_of_particles_in_set_method
def apply_on_all(self, particles):
raise Exception("Getting all links to other particles from all particles in a set is not implemented yet")
def apply_on_one(self, set, particle):
from_indices = set._private.attribute_storage.get_indices_of([particle.key,])
if not self.get_number_of_particles_in_set_method is None:
number_of_particles_in_set = self.get_number_of_particles_in_set_method(from_indices)[0]
indices = self.method([from_indices[0]] * number_of_particles_in_set, list(range(number_of_particles_in_set)))
else:
index = self.method()
indices = [index]
keys = set._private.attribute_storage._get_keys_for_indices_in_the_code(indices)
return particle.as_set()._subset(keys)
| 50,770
| 38.945712
| 139
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/memory_storage.py
|
import numpy
from numpy import ma
from amuse.units import quantities
from amuse.units.quantities import VectorQuantity
from amuse.units.quantities import is_quantity
from amuse.datamodel.base import *
from amuse.support import exceptions
from amuse.rfi.async_request import FakeASyncRequest
try:
if numpy.uintp().itemsize<8: raise Exception()
from amuse.datamodel.simple_hash import SimpleHash
_SIMPLE_HASH_PRESENT_=True
except BaseException as ex:
_SIMPLE_HASH_PRESENT_=False
_PREFER_SORTED_KEYS_=True
class InMemoryAttributeStorage(AttributeStorage):
def __init__(self):
self.mapping_from_attribute_to_quantities = {}
self.particle_keys = numpy.zeros(0)
self.__version__ = 0
self.sorted_keys = numpy.zeros(0, dtype=numpy.int32)
self.sorted_indices = numpy.zeros(0)
self.index_array = numpy.zeros(0, dtype=numpy.int32)
self.keys_set = set([])
def can_extend_attributes(self):
return True
def remove_attribute_from_store(self, name):
del self.mapping_from_attribute_to_quantities[name]
def add_particles_to_store(self, keys, attributes = [], quantities = []):
if len(quantities) != len(attributes):
raise exceptions.AmuseException(
"you need to provide the same number of quantities as attributes, found {0} attributes and {1} list of values".format(
len(attributes), len(quantities)
)
)
if len(quantities) > 0 and len(keys) != len(quantities[0]):
raise exceptions.AmuseException(
"you need to provide the same number of values as particles, found {0} values and {1} particles".format(
len(quantities[0]), len(keys)
)
)
self.__version__ = self.__version__ + 1
self.index_array = numpy.arange(len(self.particle_keys) + len(keys))
if len(self.particle_keys) > 0:
previous_length = len(self.particle_keys)
self.append_to_storage(keys, attributes, quantities)
return self.index_array[previous_length:]
else:
self.setup_storage(keys, attributes, quantities)
return self.index_array
def setup_storage(self, keys, attributes, quantities):
self.mapping_from_attribute_to_quantities = {}
for attribute, quantity in zip(attributes, quantities):
storage = InMemoryAttribute.new_attribute(attribute, len(keys), quantity)
self.mapping_from_attribute_to_quantities[attribute] = storage
storage.set_values(None, quantity)
self.particle_keys = numpy.array(keys, dtype='uint64')
self.reindex()
def append_to_storage(self, keys, attributes, values):
for attribute, values_to_set in zip(attributes, values):
if attribute in self.mapping_from_attribute_to_quantities:
storage = self.mapping_from_attribute_to_quantities[attribute]
else:
storage = InMemoryAttribute.new_attribute(attribute, len(self.particle_keys), values_to_set)
self.mapping_from_attribute_to_quantities[attribute] = storage
storage.increase_to_length(len(self.particle_keys) + len(keys))
try:
storage.set_values(slice(len(self.particle_keys), len(self.particle_keys) + len(keys)), values_to_set)
except Exception as ex:
raise AttributeError("exception in setting attribute '{0}', error was '{1}'".format(attribute, ex))
old_length = len(self.particle_keys)
for attribute, attribute_values in list(self.mapping_from_attribute_to_quantities.items()):
attribute_values.increase_to_length(len(self.particle_keys) + len(keys))
self.particle_keys = numpy.concatenate((self.particle_keys, numpy.array(list(keys), dtype='uint64')))
self.reindex()
def get_values_in_store(self, indices, attributes):
results = []
for attribute in attributes:
if not attribute in self.mapping_from_attribute_to_quantities:
raise AttributeError('"{0}" not defined for grid'.format(attribute))
storage = self.mapping_from_attribute_to_quantities[attribute]
selected_values = storage.get_values(indices)
results.append(selected_values)
return results
def get_values_in_store_async(self, indices, attributes):
result = self.get_values_in_store(indices, attributes)
return FakeASyncRequest(result)
def set_values_in_store(self, indices, attributes, list_of_values_to_set):
for attribute, values_to_set in zip(attributes, list_of_values_to_set):
if attribute in self.mapping_from_attribute_to_quantities:
storage = self.mapping_from_attribute_to_quantities[attribute]
else:
storage = InMemoryAttribute.new_attribute(attribute, len(self.particle_keys), values_to_set)
self.mapping_from_attribute_to_quantities[attribute] = storage
try:
storage.set_values(indices, values_to_set)
except ValueError as ex:
# hack to set values between
# with quanities with units.none
# and when values are stored without units
# note an alternative might be to store always with units.none
# but have the getitem remove the unit, caveat: maintaining compatibility in fileformat?
if is_quantity(values_to_set) and not storage.has_units():
if not values_to_set.unit.base:
storage.set_values(indices, values_to_set.value_in(units.none))
else:
raise AttributeError("exception in setting attribute '{0}', error was '{1}'".format(attribute, ex))
# this is no longer necessary:
#~ elif not is_quantity(values_to_set) and storage.has_units():
#~ if not storage.quantity.unit.base:
#~ storage.set_values(indices, units.none.new_quantity(values_to_set))
#~ else:
#~ raise AttributeError("exception in setting attribute '{0}', error was '{1}'".format(attribute, ex))
else:
raise AttributeError("exception in setting attribute '{0}', error was '{1}'".format(attribute, ex))
def set_values_in_store_async(self, indices, attributes, list_of_values_to_set):
result = self.set_values_in_store(indices, attributes, list_of_values_to_set)
return FakeASyncRequest(result)
def has_key_in_store(self, key):
return key in self.keys_set
def get_all_keys_in_store(self):
return self.particle_keys
def get_all_indices_in_store(self):
return self.index_array
def __len__(self):
return len(self.particle_keys)
def copy(self):
copy = get_in_memory_attribute_storage_factory()()
copy.sorted_keys = self.sorted_keys.copy()
copy.sorted_indices = self.sorted_indices.copy()
copy.keys_set = self.keys_set.copy()
copy.particle_keys = self.particle_keys.copy()
copy.index_array = self.index_array.copy()
for attribute, attribute_values in self.mapping_from_attribute_to_quantities.items():
copy.mapping_from_attribute_to_quantities[attribute] = attribute_values.copy()
return copy
def get_value_of(self, index, attribute):
return self.get_value_in_store(index, attribute)
def get_indices_of(self, keys):
raise NotImplementedError()
def remove_particles_from_store(self, indices):
for attribute, attribute_values in list(self.mapping_from_attribute_to_quantities.items()):
attribute_values.remove_indices(indices)
self.particle_keys = numpy.delete(self.particle_keys,indices)
self.reindex()
self.__version__ = self.__version__ + 1
self.index_array = numpy.arange(len(self))
def reindex(self):
self.sorted_indices = numpy.argsort(self.particle_keys, kind='mergesort')
self.sorted_keys = self.particle_keys[self.sorted_indices]
self.keys_set = set(self.particle_keys)
self.index_array = numpy.arange(len(self))
def get_defined_attribute_names(self):
return sorted(self.mapping_from_attribute_to_quantities.keys())
def get_defined_settable_attribute_names(self):
return self.get_defined_attribute_names()
def _get_values_for_indices(self, indices, attributes):
results = []
for attribute in attributes:
attribute_values = self.mapping_from_attribute_to_quantities[attribute]
if indices is None:
selected_values = attribute_values
else:
selected_values = attribute_values.take(indices)
results.append(selected_values)
return results
def get_value_in_store(self, index, attribute):
attribute_values = self.mapping_from_attribute_to_quantities[attribute]
return attribute_values.get_value(index)
class InMemoryGridAttributeStorage(object):
def __init__(self, *number_of_points_in_each_direction):
self.mapping_from_attribute_to_quantities = {}
self.number_of_points_in_each_direction = number_of_points_in_each_direction
def can_extend_attributes(self):
return True
def storage_shape(self):
return self.number_of_points_in_each_direction
def add_particles_to_store(self, keys, attributes = [], quantities = []):
raise exceptions.AmuseException("adding points to the grid is not implemented")
def remove_particles_from_store(self, keys):
raise exceptions.AmuseException("removing points from the grid is not implemented")
def remove_attribute_from_store(self, name):
del self.mapping_from_attribute_to_quantities[name]
def get_values_in_store(self, indices, attributes):
results = []
for attribute in attributes:
if not attribute in self.mapping_from_attribute_to_quantities:
raise AttributeError('"{0}" not defined for grid'.format(attribute))
attribute_values = self.mapping_from_attribute_to_quantities[attribute]
#if indices is None:
# selected_values = attribute_values
#else:
# selected_values = attribute_values[indices]
results.append(attribute_values.get_values(indices))
return results
def set_values_in_store(self, indices, attributes, list_of_values_to_set):
for attribute, values_to_set in zip(attributes, list_of_values_to_set):
if attribute in self.mapping_from_attribute_to_quantities:
storage = self.mapping_from_attribute_to_quantities[attribute]
else:
storage = InMemoryAttribute.new_attribute(attribute, self.storage_shape(), values_to_set)
self.mapping_from_attribute_to_quantities[attribute] = storage
#storage.set_values(None, quantity)
#if is_quantity(values_to_set):
# attribute_values = VectorQuantity.zeros(
# (self.storage_shape()),
# values_to_set.unit,
# )
#else:
# dtype = numpy.asanyarray(values_to_set).dtype
# attribute_values = numpy.zeros((self.storage_shape()), dtype = dtype)
try:
storage.set_values(indices, values_to_set)
except ValueError as ex:
# hack to set values between
# with quanities with units.none
# and when values are stored without units
# note an alternative might be to store always with units.none
# but have the getitem remove the unit, caveat: maintaining compatibility in fileformat?
if is_quantity(values_to_set) and not storage.has_units():
if not values_to_set.unit.base:
storage.set_values(indices, values_to_set.value_in(units.none))
else:
raise AttributeError("exception in setting attribute '{0}', error was '{1}'".format(attribute, ex))
else:
raise AttributeError("exception in setting attribute '{0}', error was '{1}'".format(attribute, ex))
def has_key_in_store(self, key):
return key in self.mapping_from_particle_to_index
def get_all_keys_in_store(self):
return Ellipsis #numpy.s_[0:self.number_of_i], numpy.s_[0:self.number_of_j], numpy.s_[0:self.number_of_k]
def __len__(self):
return self.storage_shape()[0]
def copy(self):
copy = InMemoryGridAttributeStorage(*self.number_of_points_in_each_direction)
for attribute, attribute_values in self.mapping_from_attribute_to_quantities.items():
copy.mapping_from_attribute_to_quantities[attribute] = attribute_values.copy()
return copy
def get_defined_attribute_names(self):
return sorted(self.mapping_from_attribute_to_quantities.keys())
def _get_writeable_attribute_names(self):
return self.get_defined_attribute_names()
def get_defined_settable_attribute_names(self):
return self.get_defined_attribute_names()
class InMemoryAttributeStorageUseDictionaryForKeySet(InMemoryAttributeStorage):
def __init__(self):
InMemoryAttributeStorage.__init__(self)
self.mapping_from_particle_to_index = {}
def has_key_in_store(self, key):
return key in self.mapping_from_particle_to_index
def copy(self):
copy = type(self)()
copy.mapping_from_particle_to_index = self.mapping_from_particle_to_index.copy()
copy.particle_keys = self.particle_keys.copy()
copy.index_array = self.index_array.copy()
for attribute, attribute_values in self.mapping_from_attribute_to_quantities.items():
copy.mapping_from_attribute_to_quantities[attribute] = attribute_values.copy()
return copy
def get_indices_of(self, particles):
if particles is None:
return numpy.arange(0,len(self.particle_keys))
mapping_from_particle_to_index = self.mapping_from_particle_to_index
result = []
notfoundkeys = []
foundkeys = []
for index, particle_key in enumerate(particles):
try:
result.append(mapping_from_particle_to_index[particle_key])
foundkeys.append(particle_key)
except KeyError:
notfoundkeys.append(particle_key)
if not len(notfoundkeys) == 0:
raise exceptions.KeysNotInStorageException(
numpy.asarray(foundkeys),
numpy.asarray(result),
numpy.asarray(notfoundkeys)
)
return numpy.asarray(result)
def reindex(self):
new_index=dict(zip(self.particle_keys,range(len(self.particle_keys))))
self.mapping_from_particle_to_index = new_index
class InMemoryAttributeStorageUseSortedKeys(InMemoryAttributeStorage):
def __init__(self):
InMemoryAttributeStorage.__init__(self)
self.sorted_keys = []
self.sorted_indices = []
def has_key_in_store(self, key):
i=numpy.searchsorted(self.sorted_keys, key)
return i<len(self.sorted_keys) and self.sorted_keys[i]==key
def copy(self):
copy = type(self)()
copy.sorted_keys = self.sorted_keys.copy()
copy.sorted_indices = self.sorted_indices.copy()
copy.keys_set = self.keys_set.copy()
copy.particle_keys = self.particle_keys.copy()
copy.index_array = self.index_array.copy()
for attribute, attribute_values in self.mapping_from_attribute_to_quantities.items():
copy.mapping_from_attribute_to_quantities[attribute] = attribute_values.copy()
return copy
def get_indices_of(self, keys):
if keys is None:
return self.index_array
if len(self.particle_keys) == 0:
return ()
indices = numpy.searchsorted(self.sorted_keys, keys)
indices = numpy.where(indices >= len(self.sorted_keys), 0, indices)
foundkeys = self.sorted_keys[indices]
are_found = foundkeys == keys
are_all_found = numpy.all(are_found)
if not are_all_found:
arrayedkeys = numpy.asarray(keys)
notfoundkeys = arrayedkeys[numpy.logical_not(are_found)]
raise exceptions.KeysNotInStorageException(
arrayedkeys[are_found],
self.sorted_indices[indices[are_found]],
notfoundkeys
)
return self.sorted_indices[indices]
def reindex(self):
self.sorted_indices = numpy.argsort(self.particle_keys, kind='mergesort')
self.sorted_keys = self.particle_keys[self.sorted_indices]
class InMemoryAttributeStorageUseSimpleHash(InMemoryAttributeStorage):
def __init__(self):
InMemoryAttributeStorage.__init__(self)
self._hash=SimpleHash()
def has_key_in_store(self, key):
return self._hash.key_present(key)
def copy(self):
copy = type(self)()
copy.particle_keys = self.particle_keys.copy()
copy.index_array = self.index_array.copy()
copy._hash.reindex(copy.particle_keys)
for attribute, attribute_values in self.mapping_from_attribute_to_quantities.items():
copy.mapping_from_attribute_to_quantities[attribute] = attribute_values.copy()
return copy
def get_indices_of(self, keys):
if keys is None:
return self.index_array
if len(self.particle_keys) == 0:
return ()
return self._hash.lookup(keys)
def reindex(self):
self._hash.reindex(self.particle_keys)
def __getstate__(self):
state=self.__dict__.copy()
state.pop("_hash")
return state
def __setstate__(self,state):
self.__dict__ = state
self._hash=SimpleHash()
if len(self.particle_keys):
self._hash.reindex(self.particle_keys)
def get_in_memory_attribute_storage_factory():
if _SIMPLE_HASH_PRESENT_:
return InMemoryAttributeStorageUseSimpleHash
elif _PREFER_SORTED_KEYS_:
return InMemoryAttributeStorageUseSortedKeys
else:
return InMemoryAttributeStorageUseDictionaryForKeySet
class InMemoryAttribute(object):
def __init__(self, name):
self.name = name
def get_values(self, indices):
pass
def set_values(self, indices, valus):
pass
def get_length(self):
return 0
def get_shape(self):
return 0
def increase_to_length(self, newlength):
pass
def copy(self):
pass
@classmethod
def _determine_shape(cls, length, values_to_set):
if isinstance(length, tuple):
vector_shape = values_to_set.shape
if len(vector_shape) > len(length):
return vector_shape
else:
return length
vector_shape = values_to_set.shape
if len(vector_shape) > 1 and len(values_to_set) == length:
return vector_shape
else:
return length
@classmethod
def new_attribute(cls, name, shape, values_to_set):
if is_quantity(values_to_set):
if values_to_set.is_vector() :
shape = cls._determine_shape(shape, values_to_set)
return InMemoryVectorQuantityAttribute(name, shape, values_to_set.unit)
elif values_to_set is None:
return InMemoryLinkedAttribute(name, shape)
else:
array = numpy.asanyarray(values_to_set)
dtype = array.dtype
shape = cls._determine_shape(shape, array)
if dtype.kind == 'S' or dtype.kind == 'U':
return InMemoryStringAttribute(name, shape, dtype)
elif dtype == object:
return InMemoryLinkedAttribute(name, shape)
else:
return InMemoryUnitlessAttribute(name, shape, dtype)
def get_value(self, index):
pass
def remove_indices(self, indices):
pass
class InMemoryVectorQuantityAttribute(InMemoryAttribute):
def __init__(self, name, shape, unit):
InMemoryAttribute.__init__(self, name)
self.quantity = VectorQuantity.zeros(
shape,
unit,
)
def get_values(self, indices):
if indices is None:
return self.quantity
else:
return self.quantity[indices]
def set_values(self, indices, values):
try:
if indices is None:
indices = slice(None)
self.quantity[indices] = values
except AttributeError:
if not is_quantity(values):
raise ValueError("Tried to set a non quantity value for an attribute ({0}) with a unit".format(self.name))
else:
raise
def get_shape(self):
return self.quantity.shape
def increase_to_length(self, newlength):
delta = newlength - len(self.quantity)
if delta == 0:
return
deltashape = list(self.quantity.shape)
deltashape[0] = delta
zeros_for_concatenation = VectorQuantity.zeros(deltashape, self.quantity.unit)
self.quantity.extend(zeros_for_concatenation)
def get_length(self):
return len(self.quantity)
def copy(self):
result = type(self)(self.name, self.get_shape(), self.quantity.unit)
result.set_values(None, self.get_values(None))
return result
def get_value(self, index):
return self.quantity[index]
def remove_indices(self, indices):
self.quantity._number = numpy.delete(self.quantity.number, indices)
def has_units(self):
return True
class InMemoryUnitlessAttribute(InMemoryAttribute):
def __init__(self, name, shape, dtype = 'float64'):
InMemoryAttribute.__init__(self, name)
self.values = numpy.zeros(
shape,
dtype = dtype
)
def get_values(self, indices):
if indices is None:
return self.values
else:
return self.values[indices]
def set_values(self, indices, values):
if indices is None:
indices = slice(None)
self.values[indices] = values
def get_length(self):
return self.values.shape
def increase_to_length(self, newlength):
delta = newlength - len(self.values)
if delta == 0:
return
deltashape = list(self.values.shape)
deltashape[0] = delta
zeros_for_concatenation = numpy.zeros(deltashape, dtype = self.values.dtype)
self.values = numpy.concatenate([self.values, zeros_for_concatenation])
def get_shape(self):
return self.values.shape
def copy(self):
result = type(self)(self.name, self.get_shape(), self.values.dtype)
result.set_values(None, self.get_values(None))
return result
def get_value(self, index):
return self.values[index]
def remove_indices(self, indices):
self.values = numpy.delete(self.values, indices)
def has_units(self):
return False
class InMemoryStringAttribute(InMemoryUnitlessAttribute):
def set_values(self, indices, values):
if indices is None:
indices = slice(None)
if isinstance(values, str):
dtype=numpy.dtype(self.values.dtype.kind+str(max(1,len(values))))
else:
values_as_array = numpy.asarray(values, dtype=numpy.dtype(self.values.dtype.kind))
dtype = values_as_array.dtype
if dtype.itemsize > self.values.dtype.itemsize:
self.values = numpy.asarray(self.values, dtype=dtype)
self.dtype = dtype
self.values[indices] = values
class InMemoryLinkedAttribute(InMemoryAttribute):
def __init__(self, name, shape):
InMemoryAttribute.__init__(self, name)
self.values = LinkedArray(numpy.empty(
shape,
dtype = object
))
def get_values(self, indices):
if indices is None:
objects = self.values
else:
objects = self.values[indices]
return objects
def set_values(self, indices, values):
if indices is None:
indices = slice(None)
self.values[indices] = values
def get_length(self):
return self.values.shape
def increase_to_length(self, newlength):
delta = newlength - len(self.values)
if delta == 0:
return
deltashape = list(self.values.shape)
deltashape[0] = delta
zeros_for_concatenation = numpy.empty(deltashape, dtype = self.values.dtype)
self.values = LinkedArray(numpy.concatenate([self.values, zeros_for_concatenation]))
def get_shape(self):
return self.values.shape
def copy(self):
result = type(self)(self.name, self.get_shape())
result.set_values(None, self.get_values(None))
return result
def get_value(self, index):
value = self.values[index]
return value
def remove_indices(self, indices):
self.values = numpy.delete(self.values, indices)
def has_units(self):
return False
| 26,559
| 35.234652
| 134
|
py
|
amuse
|
amuse-main/src/amuse/datamodel/particles.py
|
from amuse.support.core import CompositeDictionary
from amuse.support.core import compare_version_strings
from amuse.support import exceptions
from amuse.datamodel.base import *
from amuse.datamodel import base
from amuse.datamodel.memory_storage import *
from amuse.datamodel import trees
from amuse.units import constants
from amuse.units import units
from amuse.units import quantities
from amuse.units import trigo
from amuse.units.quantities import Quantity
from amuse.units.quantities import new_quantity
from amuse.units.quantities import is_quantity
from amuse.units.quantities import as_vector_quantity
from amuse.units.quantities import zero
from amuse.units.quantities import AdaptingVectorQuantity
import random
import numpy
from numpy import ma
try:
from types import EllipsisType
except:
EllipsisType = type(Ellipsis)
class AbstractParticleSet(AbstractSet):
"""
Abstract superclass of all sets of particles.
This class defines common code for all particle sets.
Particle sets define dynamic attributes. Attributes
can be set and retrieved on the particles using common python
syntax. These attributes can only have values with units.
>>> particles = Particles(2)
>>> particles.mass = [10.0, 20.0] | units.kg
>>> particles.mass
quantity<[10.0, 20.0] kg>
>>> particles.mass = 1.0 | units.kg
>>> particles.mass
quantity<[1.0, 1.0] kg>
Particle sets can be iterated over.
>>> particles = Particles(2)
>>> particles.mass = [10.0, 20.0] | units.kg
>>> for particle in particles:
... print particle.mass
...
10.0 kg
20.0 kg
Particle sets can be indexed.
>>> particles = Particles(3)
>>> particles.x = [10.0, 20.0, 30.0] | units.m
>>> particles[1].x
quantity<20.0 m>
Particle sets can be sliced.
>>> particles = Particles(3)
>>> particles.x = [10.0, 20.0, 30.0] | units.m
>>> particles[1:].x
quantity<[20.0, 30.0] m>
Particle sets can be copied.
>>> particles = Particles(3)
>>> particles.x = [10.0, 20.0, 30.0] | units.m
>>> copy = particles.copy()
>>> particles.x = 2.0 | units.m
>>> particles.x
quantity<[2.0, 2.0, 2.0] m>
>>> copy.x
quantity<[10.0, 20.0, 30.0] m>
Particle sets can be added together.
Attribute values are not stored by the resulting subset. The subset
provides a view on two or more sets of particles. Changing attributes
of the sum of sets will also change the attributes of each original
set, contrary to copy().
>>> particles = Particles(4)
>>> particles1 = particles[:2]
>>> particles1.x = [1.0, 2.0] | units.m
>>> particles2 = particles[2:]
>>> particles2.x = [3.0, 4.0] | units.m
>>> new_set = particles1 + particles2
>>> print len(new_set)
4
>>> print new_set.x
[1.0, 2.0, 3.0, 4.0] m
Particle sets can be subtracted from each other.
Like with addition, attribute values are not stored by the resulting
subset.
>>> particles = Particles(4)
>>> particles.x = [1.0, 2.0, 3.0, 4.0] | units.m
>>> junk = particles[2:]
>>> new_set = particles - junk
>>> print len(new_set)
2
>>> print new_set.x
[1.0, 2.0] m
>>> print particles.x
[1.0, 2.0, 3.0, 4.0] m
Particle sets can have instance based or global vector attributes.
A particle set stores a list of scalar values for each attribute.
Some attributes are more naturally accessed as lists
of vector values. Once defined, a particle set can
convert the scalar values of 2 or more attributes into one
vector attribute.
>>> from amuse.datamodel import particle_attributes
>>> particles = Particles(2)
>>> particles.x = [1.0 , 2.0] | units.m
>>> particles.y = [3.0 , 4.0] | units.m
>>> particles.z = [5.0 , 6.0] | units.m
>>> particles.add_vector_attribute("p", ["x","y","z"])
>>> particles.p
quantity<[[1.0, 3.0, 5.0], [2.0, 4.0, 6.0]] m>
>>> particles.p[0]
quantity<[1.0, 3.0, 5.0] m>
>>> particles.position # "position" is a global vector attribute, coupled to x,y,z
quantity<[[1.0, 3.0, 5.0], [2.0, 4.0, 6.0]] m>
"""
# this construct is needed to ensure that numpy see's grids
# as objects and not as sequences
# if we put a grid in a numpy object array we want the
# grid in a field of that array and not the contents of
# the grid (i.e. the grid points)
# grids have the same trick
if compare_version_strings(numpy.__version__, '1.7.0') < 0:
__array_interface__ = {'shape':() }
else:
__array_interface__ = {'shape':(),'typestr':'|O4' }
GLOBAL_DERIVED_ATTRIBUTES = {}
def __init__(self, original = None):
AbstractSet.__init__(self, original)
def __delattr__(self, name):
self.remove_attribute_from_store(name)
#
# Particle storage interface
#
def remove_attribute_from_store(self, name):
pass
def remove_particles_from_store(self, indices):
pass
def get_values_in_store(self, keys, attributes):
pass
def get_attribute_names_defined_in_store(self):
return []
def get_settable_attribute_names_defined_in_store(self):
return []
def get_indices_of_keys(self, keys):
pass
#
#
#
def _values_of_particle(self, index):
attributes = self.get_attribute_names_defined_in_store()
values = self.get_values_in_store(numpy.asarray([index]), attributes)
for attribute, val in zip(attributes, values):
yield attribute, val[0]
#
# public API
#
def __iter__(self):
original_set = self._original_set()
for key, index in zip(self.get_all_keys_in_store(), self.get_all_indices_in_store()):
yield original_set._get_particle_unsave(key, index)
def get_all_particles_at(self, *indices):
all_keys = self.get_all_keys_in_store()
selectedkeyes = [all_keys[x] for x in indices]
return self._subset(selectedkeyes)
def __str__(self):
"""
Display string of a particle set.
>>> p0 = Particle(10)
>>> p1 = Particle(11)
>>> particles = Particles()
>>> particles.add_particle(p0) # doctest:+ELLIPSIS
<amuse.datamodel.particles.Particle object at ...>
>>> particles.add_particle(p1) # doctest:+ELLIPSIS
<amuse.datamodel.particles.Particle object at ...>
>>> particles.x = [4.0 , 3.0] | units.m
>>> particles.y = [5.0 , 2.0] | units.km
>>> print particles
key x y
- m km
==================== =========== ===========
10 4.000e+00 5.000e+00
11 3.000e+00 2.000e+00
==================== =========== ===========
"""
return self.to_string()
def to_string(self, attributes_to_show = None, split_at = 20):
"""
Display string of a particle set.
>>> p0 = Particle(10)
>>> p1 = Particle(11)
>>> particles = Particles()
>>> particles.add_particle(p0) # doctest:+ELLIPSIS
<amuse.datamodel.particles.Particle object at ...>
>>> particles.add_particle(p1) # doctest:+ELLIPSIS
<amuse.datamodel.particles.Particle object at ...>
>>> particles.x = [4.0 , 3.0] | units.m
>>> particles.y = [5.0 , 2.0] | units.km
>>> print particles.to_string()
key x y
- m km
==================== =========== ===========
10 4.000e+00 5.000e+00
11 3.000e+00 2.000e+00
==================== =========== ===========
"""
attributes = sorted(self.get_attribute_names_defined_in_store())
if attributes_to_show:
attributes = [x for x in attributes if x in attributes_to_show]
if len(self) == 0:
return self.empty_particles_set_string(attributes)
format_float = '{0: >11.3e}'.format
format_str20 = '{0: >20}'.format
format_str11 = '{0!s: >11}'.format
columns = [[format_str11(x)] for x in attributes]
columns.insert(0,[format_str20('key')])
all_values = self.get_values_in_store(self.get_all_indices_in_store(), attributes)
for index, quantity in enumerate(all_values):
column = columns[index + 1]
if hasattr(quantity, 'unit'):
column.append(format_str11(str(quantity.unit)))
quantity = quantity.number
else:
column.append(format_str11('none'))
column.append('=' * 11)
if len(quantity) > split_at * 2:
if isinstance(quantity, LinkedArray):
values_to_show = list(map(format_str11,quantity[:split_at].to_print_list()))
values_to_show.append(format_str11('...'))
values_to_show.extend(map(format_str11,quantity[-split_at:].to_print_list()))
elif hasattr(quantity, 'dtype'):
if numpy.issubdtype(quantity.dtype, numpy.floating):
values_to_show = list(map(format_float,quantity[:split_at]))
values_to_show.append(format_str11('...'))
values_to_show.extend(map(format_float,quantity[-split_at:]))
else:
values_to_show = list(map(format_str11,quantity[:split_at]))
values_to_show.append(format_str11('...'))
values_to_show.extend(map(format_str11,quantity[-split_at:]))
else:
values_to_show = list(map(format_str11,quantity[:split_at]))
values_to_show.append(format_str11('...'))
values_to_show.extend(map(format_str11,quantity[-split_at:]))
else:
if isinstance(quantity, LinkedArray):
values_to_show = list(map(format_str11,quantity.to_print_list()))
elif hasattr(quantity, 'dtype'):
if numpy.issubdtype(quantity.dtype, numpy.floating):
try:
values_to_show = list(map(format_float,quantity))
except ValueError:
values_to_show = list(map(format_str11,quantity))
else:
values_to_show = list(map(format_str11,quantity))
else:
values_to_show = list(map(format_str11, quantity))
column.extend(values_to_show)
column.append('=' * 11)
column = columns[0]
column.append(format_str20("-"))
column.append('=' * 20)
particle_keys = self.get_all_keys_in_store()
if len(particle_keys) > split_at * 2:
values_to_show = list(map(format_str20, particle_keys[:split_at]))
values_to_show.append(format_str20('...'))
values_to_show.extend(map(format_str20, particle_keys[-split_at:]))
else:
values_to_show = list(map(format_str20,particle_keys))
column.extend(values_to_show)
column.append('=' * 20)
rows = []
for i in range(len(columns[0])):
row = [x[i] for x in columns]
rows.append(row)
lines = [' '.join(x) for x in rows]
return '\n'.join(lines)
def _get_particle(self, key):
if self.has_key_in_store(key):
return Particle(key, self._original_set())
else:
return None
def _get_particle_unsave(self, key, index = None):
return Particle(
key,
self._original_set(),
set_index = index,
set_version = self._get_version()
)
def can_extend_attributes(self):
return self._original_set().can_extend_attributes()
def add_attribute_domain(self, namespace):
self._derived_attributes[namespace] = DomainAttribute(namespace)
def _is_superset(self):
return False
def as_binary_tree(self, name_of_first_child = 'child1', name_of_second_child = 'child2'):
return trees.ChildTreeOnParticleSet(self, (name_of_first_child, name_of_second_child))
def new_binary_tree_wrapper(self, name_of_first_child = 'child1', name_of_second_child = 'child2'):
return trees.ChildTreeOnParticleSet(self, (name_of_first_child, name_of_second_child))
def copy(self, memento = None, keep_structure = False, filter_attributes = lambda particle_set, x : True):
attributes = self.get_attribute_names_defined_in_store()
attributes = [x for x in attributes if filter_attributes(self, x)]
keys = self.get_all_keys_in_store()
indices = self.get_all_indices_in_store()
values = self.get_values_in_store(indices, attributes)
result = self._factory_for_new_collection()()
if memento is None:
memento = {}
memento[id(self._original_set())] = result
converted = []
for x in values:
if isinstance(x, (LinkedArray, ParticlesSubset)):
converted.append(x.copy(memento, keep_structure, filter_attributes))
else:
converted.append(x)
result.add_particles_to_store(keys, attributes, converted)
object.__setattr__(result, "_derived_attributes", CompositeDictionary(self._derived_attributes))
result._private.collection_attributes = self._private.collection_attributes._copy_for_collection(result)
return result
def copy_to_new_particles(self, keys = None, keys_generator = None, memento = None, keep_structure = False, filter_attributes = lambda particle_set, x : True):
if keys_generator is None:
keys_generator = base.UniqueKeyGenerator
my_keys = self.get_all_keys_in_store()
if not keys is None:
if len(keys) != len(my_keys):
raise Exception('not enough new keys given for the copy')
else:
particle_keys = keys
else:
particle_keys = keys_generator.next_set_of_keys(len(my_keys))
attributes = self.get_attribute_names_defined_in_store()
attributes = [x for x in attributes if filter_attributes(self, x)]
indices = self.get_all_indices_in_store()
values = self.get_values_in_store(indices, attributes)
result = self._factory_for_new_collection()()
if memento is None:
memento = {}
converted = []
for x in values:
if isinstance(x, LinkedArray):
converted.append(x.copy(memento, keep_structure, filter_attributes))
else:
converted.append(x)
memento[id(self._original_set())] = result
result.add_particles_to_store(particle_keys, attributes, converted)
object.__setattr__(result, "_derived_attributes", CompositeDictionary(self._derived_attributes))
result._private.collection_attributes = self._private.collection_attributes._copy_for_collection(result)
return result
def _factory_for_new_collection(self):
return Particles
def empty_copy(self):
"""
Creates a new in memory set and copies the particles to it.
The attributes and values are not copied.The history
of the set is not copied over.
>>> from amuse.datamodel import Particles
>>> from amuse.units import units
>>> original = Particles(2)
>>> original.mass = 0 | units.m
>>> print hasattr(original, "mass")
True
>>> print len(original)
2
>>> copy = original.empty_copy()
>>> print hasattr(copy, "mass")
False
>>> print len(copy)
2
"""
keys = self.get_all_keys_in_store()
result = Particles()
result.add_particles_to_store(keys, [],[])
return result
def copy_values_of_attribute_to(self, attribute_name, particles):
"""
Copy values of one attribute from this set to the
other set. Will only copy values for the particles
in both sets. See also :meth:`synchronize_to`.
If you need to do this a lot, setup a dedicated
channel.
>>> particles1 = Particles(2)
>>> particles1.x = [1.0, 2.0] | units.m
>>> particles2 = particles1.copy()
>>> print particles2.x
[1.0, 2.0] m
>>> p3 = particles1.add_particle(Particle())
>>> particles1.x = [3.0, 4.0, 5.0] | units.m
>>> particles1.copy_values_of_attribute_to("x", particles2)
>>> print particles2.x
[3.0, 4.0] m
"""
channel = self.new_channel_to(particles)
channel.copy_attributes([attribute_name])
def copy_values_of_attributes_to(self, attribute_names, particles):
channel = self.new_channel_to(particles)
channel.copy_attributes(attribute_names)
def new_channel_to(self, other, attributes=None, target_names=None):
return ParticleInformationChannel(self, other, attributes, target_names)
def __add__(self, particles):
"""
Returns a particle subset, composed of the given
particle(s) and this particle set. Attribute values are
not stored by the subset. The subset provides a view
on two or more sets of particles.
:parameter particles: (set of) particle(s) to be added to self.
>>> particles = Particles(4)
>>> particles1 = particles[:2]
>>> particles1.x = [1.0, 2.0] | units.m
>>> particles2 = particles[2:]
>>> particles2.x = [3.0, 4.0] | units.m
>>> new_set = particles1 + particles2
>>> new_set # doctest:+ELLIPSIS
<amuse.datamodel.particles.ParticlesSubset object at 0x...>
>>> print len(new_set)
4
>>> print new_set.x
[1.0, 2.0, 3.0, 4.0] m
"""
if isinstance(particles, Particle):
particles = particles.as_set()
original_particles_set = self._original_set()
if not original_particles_set is particles._original_set():
raise exceptions.AmuseException("Can't create new subset from particles belonging to "
"separate particle sets. Try creating a superset instead.")
keys = list(self.key) + list(particles.key)
new_set = ParticlesSubset(original_particles_set, keys)
if new_set.has_duplicates():
raise exceptions.AmuseException("Unable to add a particle, because it was already part of this set.")
return new_set
def __or__(self, particles):
"""
Returns a particle superset, composed of the given
particle(s) and this particle set.
:parameter particles: (set of) particle(s) to be added to self.
>>> particles1 = Particles(2)
>>> particles1.x = [1.0, 2.0] | units.m
>>> particles2 = Particles(2)
>>> particles2.x = [3.0, 4.0] | units.m
>>> new_set = particles1 | particles2
>>> new_set # doctest:+ELLIPSIS
<amuse.datamodel.particles.ParticlesSuperset object at 0x...>
>>> print len(new_set)
4
>>> print new_set.x
[1.0, 2.0, 3.0, 4.0] m
"""
if isinstance(particles, Particle):
particles = particles.as_set()
original_particles_set1 = self._original_set()
original_particles_set2 = particles._original_set()
return ParticlesSuperset((original_particles_set1, original_particles_set2))
def __sub__(self, particles):
"""
Returns a subset of the set without the given particle(s)
Attribute values are not stored by the subset. The subset
provides a view on two or more sets of particles.
:parameter particles: (set of) particle(s) to be subtracted from self.
>>> particles = Particles(4)
>>> particles.x = [1.0, 2.0, 3.0, 4.0] | units.m
>>> junk = particles[2:]
>>> new_set = particles - junk
>>> new_set # doctest:+ELLIPSIS
<amuse.datamodel.particles.ParticlesSubset object at 0x...>
>>> print len(new_set)
2
>>> print new_set.x
[1.0, 2.0] m
>>> print particles.x
[1.0, 2.0, 3.0, 4.0] m
"""
if isinstance(particles, Particle):
particles = particles.as_set()
new_keys = []
new_keys.extend(self.get_all_keys_in_store())
subtract_keys = particles.get_all_keys_in_store()
for key in subtract_keys:
if key in new_keys:
new_keys.remove(key)
else:
raise exceptions.AmuseException("Unable to subtract a particle, because it is not part of this set.")
return self._subset(new_keys)
def add_particles(self, particles):
"""
Adds particles from the supplied set to this set. Attributes
and values are copied over.
.. note::
For performance reasons the particles
are not checked for duplicates. When the same particle
is part of both sets errors may occur.
:parameter particles: set of particles to copy values from
>>> particles1 = Particles(2)
>>> particles1.x = [1.0, 2.0] | units.m
>>> particles2 = Particles(2)
>>> particles2.x = [3.0, 4.0] | units.m
>>> particles1.add_particles(particles2) # doctest:+ELLIPSIS
<amuse.datamodel.particles.ParticlesSubset object at 0x...>
>>> print len(particles1)
4
>>> print particles1.x
[1.0, 2.0, 3.0, 4.0] m
"""
if len(particles) == 0:
return ParticlesSubset(self._original_set(),[])
attributes = particles.get_attribute_names_defined_in_store()
attributes= [x for x in attributes if x not in self._derived_attributes]
indices = particles.get_all_indices_in_store()
keys = particles.get_all_keys_in_store()
values = particles.get_values_in_store(indices, attributes)
values = map(self._convert_from_entities_or_quantities, values)
converted = []
for x in values:
if isinstance(x, LinkedArray):
converted.append(x.copy_with_link_transfer(particles, self))
else:
converted.append(x)
try:
self.add_particles_to_store(keys, attributes, converted)
except exceptions.MissingAttributesAmuseException as caught_exception:
for attribute_name in caught_exception.missing_attributes:
if attribute_name in particles._derived_attributes:
attributes.append(attribute_name)
converted.append(getattr(particles, attribute_name))
else:
raise
self.add_particles_to_store(keys, attributes, converted)
return ParticlesSubset(self._original_set(),keys)
def add_particle(self, particle):
"""
Add one particle to the set.
:parameter particle: particle to add
>>> particles = Particles()
>>> print len(particles)
0
>>> particle = Particle()
>>> particle.x = 1.0 | units.m
>>> particles.add_particle(particle) # doctest:+ELLIPSIS
<amuse.datamodel.particles.Particle object at ...>
>>> print len(particles)
1
>>> print particles.x
[1.0] m
"""
return self.add_particles(particle.as_set())[0]
def ensure_presence_of(self, x):
"""
Add one particle to the set, but only if not already in set.
:parameter particle: particle to add
>>> particles = Particles()
>>> print len(particles)
0
>>> particle = Particle()
>>> p1=particles.ensure_presence_of(particle) # doctest:+ELLIPSIS
>>> print len(particles)
1
>>> p2=particles.ensure_presence_of(particle) # doctest:+ELLIPSIS
>>> print len(particles)
1
>>> p1==p2
True
>>> particle2 = Particle()
>>> set=particle.as_set()
>>> p3=set.add_particle(particle2)
>>> set2=particles.ensure_presence_of(set)
>>> len(particles)
2
>>> len(set2)
2
"""
try:
not_in_set=x-x.get_intersecting_subset_in(self)
self.add_particles(not_in_set)
return x.get_intersecting_subset_in(self)
except:
if x in self:
return x.as_particle_in_set(self)
else:
return self.add_particle(x)
def remove_particles(self, particles):
"""
Removes particles from the supplied set from this set.
:parameter particles: set of particles to remove from this set
>>> particles1 = Particles(2)
>>> particles1.x = [1.0, 2.0] | units.m
>>> particles2 = Particles()
>>> particles2.add_particle(particles1[0]) # doctest:+ELLIPSIS
<amuse.datamodel.particles.Particle object at ...>
>>> particles1.remove_particles(particles2)
>>> print len(particles1)
1
>>> print particles1.x
[2.0] m
"""
if len(particles) == 0:
return
indices = self.get_indices_of_keys(particles.get_all_keys_in_store())
self.remove_particles_from_store(indices)
def remove_particle(self, particle):
"""
Removes a particle from this set.
Result is undefined if particle is not part of the set
:parameter particle: particle to remove from this set
>>> particles1 = Particles(2)
>>> particles1.x = [1.0, 2.0] | units.m
>>> particles1.remove_particle(particles1[0])
>>> print len(particles1)
1
>>> print particles1.x
[2.0] m
"""
self.remove_particles(particle.as_set())
def synchronize_to(self, other_particles):
"""
Synchronize the particles of this set
with the contents of the provided set.
After this call the `other_particles` set will have
the same particles as this set.
This call will check if particles have been removed or
added it will not copy values of existing particles
over.
:parameter other_particles: particle set wich has to be updated
>>> particles = Particles(2)
>>> particles.x = [1.0, 2.0] | units.m
>>> copy = particles.copy()
>>> new_particle = Particle()
>>> new_particle.x = 3.0 | units.m
>>> particles.add_particle(new_particle)# doctest:+ELLIPSIS
<amuse.datamodel.particles.Particle object at ...>
>>> print particles.x
[1.0, 2.0, 3.0] m
>>> print copy.x
[1.0, 2.0] m
>>> particles.synchronize_to(copy)
>>> print copy.x
[1.0, 2.0, 3.0] m
"""
other_keys = set(other_particles.get_all_keys_in_store())
my_keys = set(self.get_all_keys_in_store())
added_keys = numpy.asarray(list(my_keys - other_keys))
removed_keys = other_keys - my_keys
removed_keys = list(removed_keys)
if len(removed_keys) > 0:
other_particles.remove_particles_from_store(other_particles.get_indices_of_keys(removed_keys))
if len(added_keys) > 0:
added_indices = self.get_indices_of_keys(added_keys)
if len(added_indices) > 1:
sort_indices = numpy.argsort(added_indices)
added_indices = added_indices[sort_indices]
added_keys = added_keys[sort_indices]
attributes = self.get_attribute_names_defined_in_store()
attributes= [x for x in attributes if x not in other_particles._derived_attributes]
values = self.get_values_in_store(added_indices, attributes)
converted = []
memento = {}
for x in values:
if isinstance(x, LinkedArray):
converted.append(x.copy_with_link_transfer(self._original_set(), other_particles, True, memento))
#converted.append(x.copy(memento, False))
else:
converted.append(x)
try:
other_particles.add_particles_to_store(added_keys, attributes, converted)
except exceptions.MissingAttributesAmuseException as caught_exception:
for attribute_name in caught_exception.missing_attributes:
if attribute_name in self._derived_attributes:
attributes.append(attribute_name)
converted.append(getattr(self._subset(added_keys), attribute_name))
else:
raise
other_particles.add_particles_to_store(added_keys, attributes, converted)
def compressed(self):
return self
def get_valid_particles_mask(self):
return numpy.ones(len(self), dtype = numpy.bool)
def as_set(self):
"""
Returns a subset view on this set. The subset
will contain all particles of this set.
>>> particles = Particles(3)
>>> particles.x = [1.0, 2.0, 3.0] | units.m
>>> subset = particles.as_set()
>>> print subset.x
[1.0, 2.0, 3.0] m
>>> print particles.x
[1.0, 2.0, 3.0] m
"""
return self._subset(self.get_all_keys_in_store())
def select(self, selection_function, attributes):
"""
Returns a subset view on this set. The subset
will contain all particles for which the selection
function returned True. The selection function
is called with scalar quantities defined by
the attributes parameter
>>> particles = Particles(3)
>>> particles.mass = [10.0, 20.0, 30.0] | units.kg
>>> particles.x = [1.0, 2.0, 3.0] | units.m
>>> subset = particles.select(lambda x : x > 15.0 | units.kg, ["mass"])
>>> print subset.mass
[20.0, 30.0] kg
>>> print subset.x
[2.0, 3.0] m
"""
keys = self.get_all_keys_in_store()
#values = self._get_values(keys, attributes) #fast but no vectors
values = [getattr(self, x) for x in attributes]
selected_keys = []
for index in range(len(keys)):
key = keys[index]
arguments = [None] * len(attributes)
for attr_index, attribute in enumerate(attributes):
arguments[attr_index] = values[attr_index][index]
if selection_function(*arguments):
selected_keys.append(key)
return self._subset(selected_keys)
def select_array(self, selection_function, attributes = ()):
"""
Returns a subset view on this set. The subset
will contain all particles for which the selection
function returned True. The selection function
is called with a vector quantities containing all
the values for the attributes parameter.
This function can be faster than the select function
as it works on entire arrays. The selection_function
is called once.
>>> particles = Particles(3)
>>> particles.mass = [10.0, 20.0, 30.0] | units.kg
>>> particles.x = [1.0, 2.0, 3.0] | units.m
>>> subset = particles.select_array(lambda x : x > 15.0 | units.kg, ["mass"])
>>> print subset.mass
[20.0, 30.0] kg
>>> print subset.x
[2.0, 3.0] m
>>> particles = Particles(1000)
>>> particles.x = units.m.new_quantity(numpy.arange(1,1001))
>>> subset = particles.select_array(lambda x : x > (500 | units.m), ("x",) )
>>> print len(subset)
500
"""
keys = self.get_all_keys_in_store()
#values = self._get_values(keys, attributes) #fast but no vectors
quantities = [getattr(self, x) for x in attributes]
selections = selection_function(*quantities)
selected_keys = numpy.compress(selections, keys)
return self._subset(selected_keys)
def difference(self, other):
"""
Returns a new subset containing the difference between
this set and the provided set.
>>> particles = Particles(3)
>>> particles.mass = [10.0, 20.0, 30.0] | units.kg
>>> particles.x = [1.0, 2.0, 3.0] | units.m
>>> subset = particles.select(lambda x : x > 15.0 | units.kg, ["mass"])
>>> less_than_15kg = particles.difference(subset)
>>> len(subset)
2
>>> len(less_than_15kg)
1
"""
return self.as_set().difference(other)
def get_timestamp(self):
return None
def has_duplicates(self):
"""
Returns True when a set contains a particle with the
same key more than once. Particles with the same
key are interpreted as the same particles.
>>> particles = Particles()
>>> p1 = particles.add_particle(Particle(1))
>>> p2 = particles.add_particle(Particle(2))
>>> particles.has_duplicates()
False
>>> p3 = particles.add_particle(Particle(1))
>>> particles.has_duplicates()
True
>>> p3 == p1
True
"""
return len(self) != len(set(self.get_all_keys_in_store()))
def _subset(self, keys):
return ParticlesSubset(self._original_set(), keys)
def _masked_subset(self, keys):
return ParticlesMaskedSubset(self._original_set(), keys)
def reversed(self):
"""
Returns a subset with the same particles, but with reversed
sequenctial order (the first particle will become last)
>>> particles = Particles(3)
>>> particles.radius = [1.0, 2.0, 3.0] | units.m
>>> r = particles.reversed()
>>> print r.radius
[3.0, 2.0, 1.0] m
"""
keys = self.get_all_keys_in_store()
return self._subset(keys[::-1])
def sorted_by_attribute(self, attribute, kind='mergesort'):
"""
Returns a subset with the same particles, but sorted
using the given attribute name
:argument: kind, the sort method for supported kinds see
the numpy.sort documentation
>>> particles = Particles(3)
>>> particles.mass = [2.0, 3.0, 1.0] | units.kg
>>> particles.radius = [1.0, 2.0, 3.0] | units.m
>>> sorted = particles.sorted_by_attribute('mass')
>>> print sorted.mass
[1.0, 2.0, 3.0] kg
>>> print sorted.radius
[3.0, 1.0, 2.0] m
"""
return self.__getitem__(getattr(self, attribute).argsort(kind=kind))
def sorted_by_attributes(self, *attributes):
"""
Returns a subset with the same particles, but sorted
using the given attribute names. The last attribute name
in the call is used for the primary sort order, the
second-to-last attribute name for the secondary sort order,
and so on. See also numpy.lexsort
>>> particles = Particles(4)
>>> particles.mass = [2.0, 3.0, 1.0, 4.0] | units.kg
>>> particles.radius = [3.0, 2.0, 1.0, 2.0] | units.m
>>> sorted = particles.sorted_by_attributes('mass', 'radius')
>>> print sorted.radius
[1.0, 2.0, 2.0, 3.0] m
>>> print sorted.mass
[1.0, 3.0, 4.0, 2.0] kg
"""
indices = self.get_all_indices_in_store()
keys = self.get_all_keys_in_store()
values = self.get_values_in_store(indices, attributes)
values = [x.number for x in values]
sorted_indices = numpy.lexsort(values)
return self._subset(keys[sorted_indices])
def __dir__(self):
"""
Utility function for introspection of paricle objects
>>> particles = Particles(3)
>>> particles.mass = [10.0, 20.0, 30.0] | units.kg
>>> particles.x = [1.0, 2.0, 3.0] | units.m
>>> print 'mass' in dir(particles)
True
>>> print 'x' in dir(particles)
True
"""
result = []
result.extend(dir(type(self)))
result.extend(self._attributes_for_dir())
return result
def __contains__(self, particle):
"""
Check if a particle is part of a set
>>> particles = Particles(3)
>>> particles.mass = [10.0, 20.0, 30.0] | units.kg
>>> p1 = particles[1]
>>> print p1 in particles
True
>>> p4 = Particle()
>>> print p4 in particles
False
"""
return isinstance(particle, Particle) and self.has_key_in_store(particle.key)
def all_attributes(self):
result = []
result.append('key')
result.extend(self._attributes_for_dir())
return result
def is_empty(self):
return self.__len__()==0
def get_intersecting_subset_in(self, other):
selected_keys = [x for x in self.get_all_keys_in_store() if other.has_key_in_store(x)]
return other._subset(selected_keys)
def _as_masked_subset_in(self, other):
keys = numpy.ma.array(self.get_all_keys_in_store(), dtype='uint64')
keys.mask = ~self.get_valid_particles_mask()
return other._masked_subset(keys)
def random_sample(self, number_of_particles):
return self.__getitem__(random.sample(range(len(self)), number_of_particles))
def empty_particles_set_string(self, attributes = ()):
result = 'empty particles set ('+self.__class__.__name__+')'
if len(attributes) > 0:
sorted_attributes = sorted(attributes)
sorted_attributes.insert(0, 'key')
header = 'attributes:'
result += '\n' + header
for x in self.divide_attributes(sorted_attributes):
result += '\n'
result += ' ' * len(header)
result += ','.join(x)
return result
def divide_attributes(self, attributes = (), n = 3):
y = []
for x in attributes:
y.append(x)
if len(y) == n:
yield y
y = []
def get_all_values_of_attribute_in_store(self, attribute):
return self.get_values_in_store(None, [attribute])[0]
def __setattr__(self, name_of_the_attribute, value):
value = self.check_attribute(value)
if name_of_the_attribute in self._derived_attributes:
self._derived_attributes[name_of_the_attribute].set_values_for_entities(self, value)
else:
self.set_values_in_store(None, [name_of_the_attribute], [self._convert_from_entities_or_quantities(value)])
def get_containing_set(self):
return self
def get_subsets(self):
raise Exception("not implemented/ not a ParticlesSuperset or derived")
def get_subset(self, name):
raise Exception("not implemented/ not a ParticlesSuperset or derived")
class Particles(AbstractParticleSet):
"""
A set of particles. Attributes and values are stored in
a private storage model. This storage model can store
the values in the python memory space, in the memory space
of the code or in a HDF5 file. By default the storage
model is in memory.
"""
def __init__(self, size = 0, storage = None, keys = None, keys_generator = None, particles = None, is_working_copy = True, **attributes):
AbstractParticleSet.__init__(self)
self._private.version = 0
self._private.is_working_copy = is_working_copy
if storage is None:
self._private.attribute_storage = get_in_memory_attribute_storage_factory()()
else:
self._private.attribute_storage = storage
if keys_generator is None:
keys_generator = base.UniqueKeyGenerator
if not particles is None:
if isinstance(particles,AbstractParticleSet):
self.add_particles(particles)
else:
for x in iter(particles):
self.add_particle(x)
elif size > 0:
if not keys is None:
if len(keys) != size:
raise Exception('keys and size was specified in the creation of a particle set, but the length of the keys is not equal to the size')
else:
particle_keys = keys
else:
particle_keys = keys_generator.next_set_of_keys(size)
self.add_particles_to_store(particle_keys)
elif not keys is None:
self.add_particles_to_store(keys)
elif len(attributes) > 0:
number_of_attributes = 0
for attributevalue in attributes.values():
if is_quantity(attributevalue):
if attributevalue.is_scalar():
number_of_attributes = max(number_of_attributes, 1)
else:
number_of_attributes = max(number_of_attributes, len(attributevalue))
else:
try:
if isinstance(attributevalue, str):
number_of_attributes = max(number_of_attributes,1)
else:
number_of_attributes = max(number_of_attributes, len(attributevalue))
except: #fails for numbers
number_of_attributes = max(number_of_attributes,1)
particle_keys = keys_generator.next_set_of_keys(number_of_attributes)
self.add_particles_to_store(particle_keys)
self._private.version = 0
if len(attributes) > 0:
attributenames = []
attributevalues = []
for attributename, attributevalue in attributes.items():
attributenames.append(attributename)
attributevalues.append(attributevalue)
self.set_values_in_store(self.get_all_indices_in_store(), attributenames, attributevalues)
self._private.previous = None
self.collection_attributes.timestamp = None
def __getitem__(self, index):
if index is None:
keys = self.get_all_keys_in_store()
index = self.get_all_indices_in_store()
else:
keys = self.get_all_keys_in_store()[index]
index = self.get_all_indices_in_store()[index]
if hasattr(keys, '__iter__'):
return self._subset(keys)
else:
return Particle(keys, self, index, self._get_version())
def _get_version(self):
return self._private.version
def __iter__(self):
keys = self.get_all_keys_in_store()
indices = self.get_all_indices_in_store()
version = self._get_version()
for i in range(len(keys)):
yield Particle(keys[i], self, indices[i], version)
def savepoint(self, timestamp=None, format = 'memory', **attributes):
if format == 'memory':
try:
instance = type(self)(is_working_copy = False)
instance._private.attribute_storage = self._private.attribute_storage.copy()
except:
instance = self.copy()
instance._private.is_working_copy = False
instance.collection_attributes.timestamp = timestamp
for name, value in attributes.items():
setattr(instance.collection_attributes, name, value)
else:
raise Exception("{0} not supported, only 'memory' savepoint supported".format(format))
instance._private.previous = self._private.previous
instance._private.version = 0
self._private.previous = instance
return instance
def new_working_copy(self):
if self._private.is_working_copy:
previous = self._private.previous
if previous is None:
raise Exception("you have not savepoint for this set, you cannot create a working copy please use copy instead".format(format))
else:
previous = self
result = previous.copy()
result._private.previous = previous
return result
def get_timestamp(self):
return self.collection_attributes.timestamp
def iter_history(self):
if self._private.is_working_copy:
current = self._private.previous
else:
current = self
while not current is None:
yield current
current = current._private.previous
def get_state_at_timestamp(self, timestamp):
previous_timestamp = None
states_and_distances = []
for state in self.iter_history():
timestamp_of_state = state.get_timestamp()
if timestamp_of_state is None:
continue
distance = abs(timestamp_of_state - timestamp)
states_and_distances.append((state, distance,))
if len(states_and_distances) == 0:
raise exceptions.AmuseException("You asked for a state at timestamp '{0}', but the set does not have any saved states so this state cannot be returned")
accompanying_state, min_distance = states_and_distances[0]
for state, distance in states_and_distances:
if distance < min_distance:
min_distance = distance
accompanying_state = state
return accompanying_state
def previous_state(self):
return self._private.previous
@property
def history(self):
return reversed(list(self.iter_history()))
def get_timeline_of_attribute(self, particle_key, attribute):
timeline = []
for x in self.history:
if x.has_key_in_store(particle_key):
index = x.get_indices_of_keys([particle_key])[0]
timeline.append((x.collection_attributes.timestamp, x._get_value_of_attribute(x[index], index, attribute)))
return timeline
def get_timeline_of_attribute_as_vector(self, particle_key, attribute):
timeline = AdaptingVectorQuantity()
chrono_values = AdaptingVectorQuantity()
for x in self.history:
if x.has_key_in_store(particle_key):
index = x.get_indices_of_keys([particle_key])[0]
timeline.append(x.collection_attributes.timestamp)
chrono_values.append(x._get_value_of_attribute(x[index], index, attribute))
return timeline, chrono_values
def get_timeline_of_attributes(self, particle_key, attributes):
result = [[] for x in range(len(attributes)+1)]
units = [None for x in range(len(attributes)+1)]
for x in self.history:
if x.has_key_in_store(particle_key):
index = x.get_indices_of_keys([particle_key])[0]
if units[0] is None:
units[0] = x.collection_attributes.timestamp.unit
for i, attribute in enumerate(attributes):
quantity = x._get_value_of_attribute(x[index], index, attribute)
if units[i+1] is None:
units[i+1] = quantity.unit
result[i+1].append(quantity.value_in(units[i+1]))
return list(map(lambda value,unit : unit.new_quantity(value), result, units))
def remove_attribute_from_store(self, name):
self._private.attribute_storage.remove_attribute_from_store(name)
def add_particles_to_store(self, keys, attributes = [], values = []):
self._private.attribute_storage.add_particles_to_store(keys, attributes, values)
self._private.version += 1
def remove_particles_from_store(self, indices):
self._private.attribute_storage.remove_particles_from_store(indices)
self._private.version += 1
def get_values_in_store(self, indices, attributes):
missing_attributes = set(attributes) - set(self.get_attribute_names_defined_in_store()) - set(["index_in_code"])
if len(missing_attributes) == 0:
return self._private.attribute_storage.get_values_in_store(indices, attributes)
defined_attributes = list(set(attributes) - missing_attributes)
defined_values = dict(list(zip(
defined_attributes,
self._private.attribute_storage.get_values_in_store(indices, defined_attributes)
)))
#print missing_attributes, "shape" in missing_attributes
#if "shape" in missing_attributes:
# import traceback
# traceback.print_stack()
# raise Exception("hello is this ok????")
subset = self[indices]
tmp = [defined_values[attribute] if attribute in defined_values else subset._get_derived_attribute_value(attribute) for attribute in attributes]
return tmp
def get_values_in_store_async(self, indices, attributes):
return self._private.attribute_storage.get_values_in_store_async(indices, attributes)
def get_indices_of_keys(self, keys):
return self._private.attribute_storage.get_indices_of(keys)
def set_values_in_store(self, indices, attributes, values):
self._private.attribute_storage.set_values_in_store(indices, attributes, values)
def set_values_in_store_async(self, indices, attributes, values):
return self._private.attribute_storage.set_values_in_store_async(indices, attributes, values)
def get_attribute_names_defined_in_store(self):
return self._private.attribute_storage.get_defined_attribute_names()
def get_settable_attribute_names_defined_in_store(self):
return self._private.attribute_storage.get_defined_settable_attribute_names()
def get_all_keys_in_store(self):
return self._private.attribute_storage.get_all_keys_in_store()
def get_all_indices_in_store(self):
return self._private.attribute_storage.get_all_indices_in_store()
def has_key_in_store(self, key):
return self._private.attribute_storage.has_key_in_store(key)
def get_value_in_store(self, index, attribute):
return self._private.attribute_storage.get_value_in_store(index, attribute)
def can_extend_attributes(self):
return self._private.attribute_storage.can_extend_attributes()
def _remove_indices_in_attribute_storage(self, indices):
self._private.attribute_storage._remove_indices(indices)
self._private.version += 1
def _add_indices_in_attribute_storage(self, indices):
self._private.attribute_storage._add_indices(indices)
self._private.version += 1
@staticmethod
def is_quantity():
return False
def new_particle(self, key = None, **keyword_arguments):
return self.add_particle(Particle(key = key, **keyword_arguments))
class BoundSupersetParticlesFunctionAttribute(object):
def __init__(self, name, superset):
self.name = name
self.superset = superset
self.subsetfunctions = []
def add_subsetfunction(self, callable):
self.subsetfunctions.append(callable)
def __call__(self, *list_arguments, **keyword_arguments):
subset_results = []
for x in self.subsetfunctions:
subset_results.append(x(*list_arguments, **keyword_arguments))
if subset_results[0] is None:
return None
if isinstance(subset_results[0], AbstractParticleSet):
return ParticlesSuperset(subset_results)
if hasattr(subset_results[0], 'unit'):
result = AdaptingVectorQuantity()
for one_result in subset_results:
result.extend(one_result)
return result
return [item for one_result in subset_results for item in one_result]
class DerivedSupersetAttribute(DerivedAttribute):
def __init__(self, name):
self.name = name
def get_values_for_entities(self, superset):
result = None
offset = 0
for subset in superset._private.particle_sets:
subset_result = getattr(subset, self.name)
if hasattr(subset_result, '__call__'):
if len(subset) > 0:
if result is None:
result = BoundSupersetParticlesFunctionAttribute(self.name, superset)
result.add_subsetfunction(subset_result)
elif hasattr(subset_result, 'unit'):
if len(subset_result) == 0:
continue
if result is None:
shape = [len(superset),] + list(subset_result.shape[1:])
result = VectorQuantity.zeros(shape, subset_result.unit)
offset = 0
try:
result[offset:len(subset_result)+offset] = subset_result
except ValueError:
raise AttributeError("Subsets return incompatible quantities for attribute '{0}', attribute cannot be queried from the superset".format(self.name))
offset += len(subset_result)
elif hasattr(subset_result, 'dtype'):
if len(subset_result) == 0:
continue
if result is None:
shape = [len(superset),] + list(subset_result.shape[1:])
result = numpy.zeros(shape, dtype=subset_result.dtype)
offset = 0
try:
result[offset:len(subset_result)+offset] = subset_result
except ValueError:
raise AttributeError("Subsets return incompatible quantities for attribute '{0}', attribute cannot be queried from the superset".format(self.name))
offset += len(subset_result)
else:
raise exceptions.AmuseException("cannot handle this type of attribute on supersets yet")
return result
def set_values_for_entities(self, superset, value):
raise exceptions.AmuseException("cannot set value of attribute '{0}'")
def get_value_for_entity(self, superset, particle, index):
raise exceptions.AmuseException("Internal AMUSE error, a single entity (Particle) should always be bound to the subset and not the superset")
def set_value_for_entity(self, superset, key, value):
raise exceptions.AmuseException("Internal AMUSE error, a single entity (Particle) should always be bound to the subset and not the superset")
class ParticlesSuperset(AbstractParticleSet):
"""A superset of particles. Attribute values are not
stored by the superset. The superset provides a view
on two or more sets of particles.
Superset objects are not supposed to be created
directly. Instead use the ``union`` methods.
>>> p1 = Particles(3)
>>> p1.mass = [10.0, 20.0, 30.0] | units.kg
>>> p2 = Particles(3)
>>> p2.mass = [40.0, 50.0, 60.0] | units.kg
>>> p = ParticlesSuperset([p1, p2])
>>> print len(p)
6
>>> print p.mass
[10.0, 20.0, 30.0, 40.0, 50.0, 60.0] kg
>>> p[4].mass = 70 | units.kg
>>> print p.mass
[10.0, 20.0, 30.0, 40.0, 70.0, 60.0] kg
>>> p2[1].mass
quantity<70.0 kg>
>>> cp = p.copy()
>>> print len(cp)
6
>>> print cp.mass
[10.0, 20.0, 30.0, 40.0, 70.0, 60.0] kg
"""
def __init__(self, particle_sets, index_to_default_set=None, names = None):
AbstractParticleSet.__init__(self)
if not names is None:
self._private.mapping_from_name_to_set = {}
for name, particle_set in zip(names, particle_sets):
self._private.mapping_from_name_to_set[name] = particle_set
self._private.particle_sets = list(particle_sets)
self._private.index_to_default_set = index_to_default_set
names_of_derived_attributes_in_all_subsets = None
for subset in particle_sets:
derived_attribute_names = set(subset._derived_attributes.keys())
if names_of_derived_attributes_in_all_subsets is None:
names_of_derived_attributes_in_all_subsets = set(derived_attribute_names)
else:
names_of_derived_attributes_in_all_subsets &= derived_attribute_names
names_of_derived_attributes_in_all_subsets -= set(self.GLOBAL_DERIVED_ATTRIBUTES.keys())
for name in names_of_derived_attributes_in_all_subsets:
self._derived_attributes[name] = DerivedSupersetAttribute(name)
self._private.version = -1
self._ensure_updated_set_properties()
if self.has_duplicates():
raise exceptions.AmuseException("Unable to add a particle, because it was already part of this set.")
def _ensure_updated_set_properties(self):
if self._private.version == self._get_subsets_version():
return
self._private.version = self._get_subsets_version()
self._private.length = numpy.sum([len(x) for x in self._private.particle_sets])
self._private.indices = numpy.arange(self._private.length)
self._private.keys = self._get_concatenated_keys_in_store()
self._private.key_to_index = {}
d = self._private.key_to_index
index = 0
for x in self._private.keys:
d[x] = index
index += 1
def can_extend_attributes(self):
for x in self._private.particle_sets:
if not x.can_extend_attributes():
return False
return True
def __len__(self):
self._ensure_updated_set_properties()
return self._private.length
def __iter__(self):
for set in self._private.particle_sets:
for particle in set:
yield particle
def _get_subsets_version(self):
versions = [[x._get_version()] for x in self._private.particle_sets]
return numpy.sum(versions)
def _get_version(self):
self._ensure_updated_set_properties()
return self._private.version
def __getitem__(self, index):
self._ensure_updated_set_properties()
offset = 0
if isinstance(index, str):
return self.get_subset(index)
else:
keys = self.get_all_keys_in_store()[index]
if hasattr(keys, '__iter__'):
return self._subset(keys)
else:
index = self.get_all_indices_in_store()[index]
for set in self._private.particle_sets:
length = len(set)
if index < (offset+length):
return set[index - offset]
offset += length
raise Exception('index not found on superset')
def _get_particle(self, key):
if self.has_key_in_store(key):
return self._get_subset_for_key(key)._get_particle(key)
else:
return None
def _get_particle_unsave(self, key, index = -1):
if index >= 0:
offset, subset = self._get_offset_and_subset_for_index(index)
index -= offset
return subset._get_particle_unsave(key, subset.get_all_indices_in_store()[index])
else:
return self._get_subset_for_key(key)._get_particle_unsave(key)
def _split_keys_over_sets(self, keys):
split_sets = [ [] for x in self._private.particle_sets ]
split_indices = [ [] for x in self._private.particle_sets ]
if keys is None:
offset = 0
for setindex, x in enumerate(self._private.particle_sets):
split_sets[setindex].extend(x.get_all_keys_in_store())
split_indices[setindex].extend(range(offset, offset + len(x)))
offset = offset + len(x)
else:
if isinstance(keys, set):
keys_array = numpy.array(list(keys))
else:
keys_array = numpy.array(keys)
indices_array = numpy.arange(len(keys_array))
for setindex, x in enumerate(self._private.particle_sets):
mask = self._in1d(keys_array, x.get_all_keys_in_store(), True)
split_sets[setindex] = keys_array[mask]
split_indices[setindex] = indices_array[mask]
return split_sets, split_indices
def _split_indices_over_sets(self, indices):
self._ensure_updated_set_properties()
split_sets = [ [] for x in self._private.particle_sets ]
split_indices = [ [] for x in self._private.particle_sets ]
offset = 0
if isinstance(indices, set):
indices = numpy.array(list(indices))
if indices is None or isinstance(indices,EllipsisType):
offset = 0
for setindex, x in enumerate(self._private.particle_sets):
split_sets[setindex] = x.get_all_indices_in_store()
split_indices[setindex] = numpy.arange(offset, offset + len(x))
offset = offset + len(x)
elif len(indices) == 0:
for setindex, x in enumerate(self._private.particle_sets):
split_sets[setindex] = []
split_indices[setindex] = []
else:
result_indices_array = numpy.arange(len(indices))
for setindex, x in enumerate(self._private.particle_sets):
mask = numpy.logical_and( (indices >= offset) , (indices < (offset + len(x))) )
indices_in_store = numpy.asarray(x.get_all_indices_in_store())
split_sets[setindex] = indices_in_store[(indices-offset)[mask]]
split_indices[setindex] = result_indices_array[mask]
offset = offset + len(x)
return split_sets, split_indices
def add_particles_to_store(self, keys, attributes = [], values = []):
if not self._private.index_to_default_set is None:
self._private.particle_sets[self._private.index_to_default_set].add_particles_to_store(keys,
attributes, values)
else:
raise exceptions.AmuseException("Cannot add particles to a superset")
def remove_particles_from_store(self, indices):
split_indices_in_subset, split_indices_in_input = self._split_indices_over_sets(indices)
for indices_in_subset, set in zip(split_indices_in_subset, self._private.particle_sets):
if len(indices_in_subset) == 0:
continue
set.remove_particles_from_store(indices_in_subset)
def get_values_in_store(self, indices, attributes):
split_indices_in_subset, split_indices_in_input = self._split_indices_over_sets(indices)
indices_and_values = []
for indices_in_subset, indices_in_input, set in zip(split_indices_in_subset, split_indices_in_input, self._private.particle_sets):
if len(indices_in_subset) > 0:
values_for_set = set.get_values_in_store(indices_in_subset, attributes)
indices_and_values.append( (indices_in_input, values_for_set) )
if indices is None or isinstance(indices, EllipsisType):
resultlength = len(self)
else:
resultlength = len(indices)
values = [[]] * len(attributes)
units = [None] * len(attributes)
converts = [lambda x : x] * len(attributes)
for indices, values_for_set in indices_and_values:
for valueindex, quantity in enumerate(values_for_set):
resultvalue = values[valueindex]
if len(resultvalue) == 0:
if not is_quantity(quantity):
dtype = quantity.dtype
converts[valueindex] = lambda x : x
units[valueindex] = None
else:
dtype = quantity.number.dtype
converts[valueindex] = quantity.unit.new_quantity
units[valueindex] = quantity.unit
shape = list(quantity.shape)
shape[0] = resultlength
resultvalue = numpy.zeros(shape,dtype=dtype)
values[valueindex] = resultvalue
resultunit = units[valueindex]
if not resultunit is None:
resultvalue[indices] = quantity.value_in(resultunit)
else:
current_dtype = quantity.dtype
result_dtype = resultvalue.dtype
if (result_dtype.kind == 'S' or result_dtype.kind == 'U') and (current_dtype.itemsize > result_dtype.itemsize):
resultvalue = numpy.asarray(resultvalue, dtype = current_dtype)
values[valueindex] = resultvalue
resultvalue[indices] = quantity
return list(map(lambda u,v : u(v), converts, values))
def set_values_in_store(self, indices, attributes, values):
split_indices_in_subset, split_indices_in_input = self._split_indices_over_sets(indices)
if indices is None or indices is Ellipsis:
len_indices = 0
for x in split_indices_in_subset:
len_indices += len(split_indices_in_subset)
else:
len_indices = len(indices)
for indices_in_subset, indices_in_input, set in zip(split_indices_in_subset, split_indices_in_input, self._private.particle_sets):
quantities = [None] * len(attributes)
for valueindex, quantity in enumerate(values):
if is_quantity(quantity):
if quantity.is_scalar():
numbers = [quantity.number]*len(indices_in_input)
elif quantity.is_vector() and len(quantity) < len_indices:
numbers = numpy.take([quantity.number]*len_indices,indices_in_input)
else:
numbers = numpy.take(quantity.number, indices_in_input)
quantities[valueindex] = quantity.unit.new_quantity(numbers)
else:
if not hasattr(quantity, 'ndim'):
numbers = numpy.asarray([quantity]*len(indices_in_input))
elif len(quantity) < len_indices:
numbers = numpy.take([quantity]*len_indices,indices_in_input)
else:
numbers = numpy.take(quantity, indices_in_input)
quantities[valueindex] = numbers
set.set_values_in_store(indices_in_subset, attributes, quantities)
def get_attribute_names_defined_in_store(self):
self._ensure_updated_set_properties()
result = None
for particle_set in self._private.particle_sets:
if len(particle_set) > 0:
attribute_names = set(particle_set.get_attribute_names_defined_in_store())
if result is None:
result = attribute_names
else:
result &= attribute_names
if result is None:
return []
else:
return list(result)
def get_settable_attribute_names_defined_in_store(self):
self._ensure_updated_set_properties()
result = None
for particle_set in self._private.particle_sets:
if len(particle_set) > 0:
attribute_names = set(particle_set.get_settable_attribute_names_defined_in_store())
if result is None:
result = attribute_names
else:
result &= attribute_names
if result is None:
return []
else:
return list(result)
def get_all_keys_in_store(self):
self._ensure_updated_set_properties()
return self._private.keys
def get_all_indices_in_store(self):
self._ensure_updated_set_properties()
return self._private.indices
def get_indices_of_keys(self, keys):
self._ensure_updated_set_properties()
mapping_from_particle_to_index = self._private.key_to_index
result = []
notfoundkeys = []
foundkeys = []
for x in keys:
try:
result.append(mapping_from_particle_to_index[x])
foundkeys.append(x)
except KeyError:
notfoundkeys.append(x)
if not len(notfoundkeys) == 0:
raise exceptions.KeysNotInStorageException(
numpy.asarray(foundkeys),
numpy.asarray(result),
numpy.asarray(notfoundkeys)
)
return numpy.array(result)
def has_key_in_store(self, key):
for set in self._private.particle_sets:
if set.has_key_in_store(key):
return True
return False
def _original_set(self):
return self
def _get_subset_for_key(self, key):
for set in self._private.particle_sets:
if set.has_key_in_store(key):
return set
return None
def _get_offset_and_subset_for_index(self, index):
offset = 0
for set in self._private.particle_sets:
setlen = len(set)
if index >= offset and index < (offset+setlen):
return offset, set
offset += setlen
return None, None
def _is_superset(self):
return True
def _in1d(self, ar1, ar2, assume_unique=False):
"""
copied from numpy.in1d (nump 1.4), to be compatible with numpy 1.3.0
"""
if not assume_unique:
ar1, rev_idx = numpy.unique(ar1, return_inverse=True)
ar2 = numpy.unique(ar2)
ar = numpy.concatenate( (ar1, ar2) )
order = ar.argsort(kind='mergesort')
sar = ar[order]
equal_adj = (sar[1:] == sar[:-1])
flag = numpy.concatenate( (equal_adj, [False] ) )
index = order.argsort(kind='mergesort')[:len( ar1 )]
if assume_unique:
return flag[index]
else:
return flag[index][rev_idx]
def _get_concatenated_keys_in_store(self):
result = []
dtype = None
for set in self._private.particle_sets:
subsetkeys = set.get_all_keys_in_store()
if dtype is None and len(set) > 0:
dtype = subsetkeys.dtype
result.extend(subsetkeys)
return numpy.array(result, dtype = dtype)
def get_subsets(self):
return list(self._private.particle_sets)
def get_subset(self, name):
return self._private.mapping_from_name_to_set[name]
class ParticlesSubset(AbstractParticleSet):
"""A subset of particles. Attribute values are not
stored by the subset. The subset provides a limited view
to the particles.
Particle subset objects are not supposed to be created
directly. Instead use the ``to_set`` or ``select`` methods.
"""
def __init__(self, particles, keys):
AbstractParticleSet.__init__(self, particles)
self._private.particles = particles
self._private.keys = numpy.array(keys, dtype='uint64')
self._private.set_of_keys = set(keys)
self._private.version = -1
self._private.indices = None
def __getitem__(self, index):
keys = self.get_all_keys_in_store()[index]
if hasattr(keys, '__iter__'):
return self._subset(keys)
else:
key = keys
if key > 0 and key < 18446744073709551615: #2**64 - 1
return self._private.particles._get_particle_unsave(key, self.get_all_indices_in_store()[index])
else:
return None
def _get_version(self):
return self._private.particles._get_version()
def compressed(self):
keys = self._private.keys
return self._subset(keys[numpy.logical_and(keys > 0 , keys < 18446744073709551615)])
def get_valid_particles_mask(self):
keys = self._private.keys
return numpy.logical_and(keys > 0 , keys < 18446744073709551615)
def unconverted_set(self):
return ParticlesSubset(self._private.particles.unconverted_set(), self._private.keys)
def add_particles_to_store(self, keys, attributes = [], values = []):
"""
Adds particles from to the subset, also
adds the particles to the superset
"""
self._private.keys = numpy.concatenate((self._private.keys, numpy.array(keys,dtype='uint64')))
self._private.set_of_keys |= set(keys)
self._private.particles.add_particles_to_store(keys, attributes, values)
def remove_particles_from_store(self, indices):
"""
Removes particles from the subset, and removes particles
from the super set
"""
indices_to_remove = set(indices)
index = 0
index_in_local_list = []
for x in self._private.indices:
if x in indices_to_remove:
index_in_local_list.append(index)
index += 1
set_to_remove = set(self._private.keys[index_in_local_list])
self._private.keys = numpy.delete(self._private.keys,index_in_local_list)
self._private.set_of_keys -= set_to_remove
self._private.particles.remove_particles_from_store(indices)
self._private.version = -1
self._private.indices = None
def get_values_in_store(self, indices, attributes):
if indices is None or indices is Ellipsis:
indices = self.get_all_indices_in_store()
return self._private.particles.get_values_in_store(indices, attributes)
def get_values_in_store_async(self, indices, attributes):
if indices is None or indices is Ellipsis:
indices = self.get_all_indices_in_store()
return self._private.particles.get_values_in_store_async(indices, attributes)
def set_values_in_store(self, indices, attributes, values):
if indices is None or indices is Ellipsis:
indices = self.get_all_indices_in_store()
self._private.particles.set_values_in_store(indices, attributes, values)
def set_values_in_store_async(self, indices, attributes, values):
if indices is None or indices is Ellipsis:
indices = self.get_all_indices_in_store()
return self._private.particles.set_values_in_store_async(indices, attributes, values)
def get_attribute_names_defined_in_store(self):
return self._private.particles.get_attribute_names_defined_in_store()
def get_settable_attribute_names_defined_in_store(self):
return self._private.particles.get_settable_attribute_names_defined_in_store()
def get_all_keys_in_store(self):
self._sync_with_set()
return self._private.keys
def get_all_indices_in_store(self):
self._sync_with_set()
return self._private.indices
def has_key_in_store(self, key):
return key in self._private.set_of_keys
def _original_set(self):
return self._private.particles
def get_timestamp(self):
return self._original_set().get_timestamp()
def get_indices_of_keys(self, keys):
self._sync_with_set()
return self._private.particles.get_indices_of_keys(keys)
def _sync_with_set(self):
if self._private.particles is None:
return
if not self._private.version == self._private.particles._get_version():
try:
self._private.indices = self._private.particles.get_indices_of_keys(self._private.keys)
self._private.version = self._private.particles._get_version()
except exceptions.KeysNotInStorageException as ex:
self._private.indices = ex.found_indices
self._private.keys = numpy.array(ex.found_keys, dtype='uint64')
self._private.set_of_keys = set(self._private.keys)
self._private.version = self._private.particles._get_version()
def previous_state(self):
return ParticlesSubset(self._private.particles.previous_state(), self._private.keys)
def difference(self, other):
self._sync_with_set()
new_set_of_keys = self._private.set_of_keys.difference(other.as_set()._private.set_of_keys)
return ParticlesSubset(self._private.particles, list(new_set_of_keys))
def union(self, other):
"""
Returns a new subset containing the union between
this set and the provided set.
>>> particles = Particles(3)
>>> particles.mass = [10.0, 20.0, 30.0] | units.kg
>>> subset1 = particles.select(lambda x : x > 25.0 | units.kg, ["mass"])
>>> subset2 = particles.select(lambda x : x < 15.0 | units.kg, ["mass"])
>>> union = subset1.union(subset2)
>>> len(union)
2
>>> sorted(union.mass.value_in(units.kg))
[10.0, 30.0]
"""
self._sync_with_set()
new_set_of_keys = self._private.set_of_keys.union(other.as_set()._private.set_of_keys)
return ParticlesSubset(self._private.particles, list(new_set_of_keys))
def as_set(self):
return self
def copy(self, memento = None, keep_structure = False, filter_attributes = lambda particle_set, x : True):
self._sync_with_set()
if keep_structure:
result = ParticlesSubset(None, [])
if memento is None:
memento = dict()
memento[id(self)] = result
if id(self._private.particles) in memento:
result._private.particles = memento[id(self._private.particles)]
else:
result._private.particles = self._private.particles.copy(memento, keep_structure, filter_attributes)
result._private.keys = numpy.array(self._private.keys, dtype='uint64')
result._private.set_of_keys = set(self._private.keys)
result._private.collection_attributes = self._private.collection_attributes._copy_for_collection(result)
return result
else:
return super(ParticlesSubset, self).copy(memento, keep_structure, filter_attributes)
class ParticlesMaskedSubset(ParticlesSubset):
"""A subset of particles. Attribute values are not
stored by the subset. The subset provides a limited view
to the particles. Unlike the normal subset the masked subset
can store None values.
"""
def __init__(self, particles, keys):
AbstractParticleSet.__init__(self, particles)
self._private.particles = particles
self._private.keys = numpy.ma.array(keys, dtype='uint64').copy()
validkeys = self._private.keys.compressed()
if len(validkeys) > 0:
self._private.set_of_keys = set(validkeys)
else:
self._private.set_of_keys = set([])
self._private.version = -1
self._private.indices = None
def compressed(self):
return self._subset(self._private.keys.compressed())
def get_valid_particles_mask(self):
return ~self._private.keys.mask
def __iter__(self):
original_set = self._original_set()
for key in self._private.keys :
if key is ma.masked:
yield None
else:
yield original_set._get_particle_unsave(key)
def __getitem__(self, index):
keys = self.get_all_keys_in_store()[index]
if not keys is ma.masked and hasattr(keys, '__iter__'):
if numpy.all(~ keys.mask):
return self._subset(keys.data)
else:
return ParticlesMaskedSubset(self._original_set(),keys)
else:
key = keys
if not key is ma.masked:
return self._original_set()._get_particle_unsave(key, self.get_all_indices_in_store()[index])
else:
return None
def _get_version(self):
return self._private.particles._get_version()
def unconverted_set(self):
return ParticlesMaskedSubset(self._private.particles.unconverted_set(), self._private.keys)
def add_particles_to_store(self, keys, attributes = [], values = []):
raise Exception("Cannot add particles to a masked subset")
def remove_particles_from_store(self, keys):
raise Exception("Cannot remove particles from a masked subset")
def get_values_in_store(self, indices, attributes):
if indices is None:
indices = self.get_all_indices_in_store()
return self._private.particles.get_values_in_store(indices, attributes)
#subkeys = keys[~mask]
#subvalues = self._private.particles.get_values_in_store(subkeys, attributes)
#raise Exception("not implemented more of this")
def set_values_in_store(self, indices, attributes, values):
if indices is None:
indices = self.get_all_indices_in_store()
mask = self._private.keys.mask
else:
mask = indices.mask
if numpy.all(~mask):
return self._private.particles.set_values_in_store(indices, attributes, values)
raise Exception("not implemented more of this")
def get_attribute_names_defined_in_store(self):
return self._private.particles.get_attribute_names_defined_in_store()
def get_settable_attribute_names_defined_in_store(self):
return self._private.particles.get_settable_attribute_names_defined_in_store()
def get_all_keys_in_store(self):
return self._private.keys
def get_all_indices_in_store(self):
if not self._private.version == self._private.particles._get_version():
mask = self._private.keys.mask
mask = ~mask
indices = self._private.particles.get_indices_of_keys(self._private.keys[mask])
self._private.indices = -1 * numpy.ones(len(self._private.keys), dtype = indices.dtype)
self._private.indices = numpy.ma.array(self._private.indices, dtype = indices.dtype)
self._private.indices.mask = self._private.keys.mask
self._private.indices[mask] = indices
self._private.version = self._private.particles._get_version()
return self._private.indices
def has_key_in_store(self, key):
return key in self._private.set_of_keys
def get_indices_of_keys(self, keys):
if len(keys) == 1:
return self._private.particles.get_indices_of_keys(keys)
keys = numpy.ma.array(keys, dtype='uint64')
result = -1 * numpy.ones(len(keys), dtype = numpy.int32)
result[~keys.mask] = self._private.particles.get_indices_of_keys(keys[~keys.mask])
return result
def previous_state(self):
return ParticlesMaskedSubset(self._private.particles.previous_state(), self._private.keys)
def copy(self, memento = None, keep_structure = False, filter_attributes = lambda particle_set, x : True):
attributes = self.get_attribute_names_defined_in_store()
attributes = [x for x in attributes if filter_attributes(self, x)]
keys = self.get_all_keys_in_store()
keys = keys[~keys.mask]
values = self.get_values_in_store(keys, attributes)
result = Particles()
converted = []
if memento is None:
memento = {}
memento[id(self._original_set())] = result
for x in values:
if isinstance(x, LinkedArray):
converted.append(x.copy(memento, keep_structure, filter_attributes))
else:
converted.append(x)
result.add_particles_to_store(keys, attributes, converted)
result._private.collection_attributes = self._private.collection_attributes._copy_for_collection(result)
return result
def copy_to_new_particles(self, keys = None, keys_generator = None, memento = None, keep_structure = False, filter_attributes = lambda particle_set, x : True):
if keys_generator is None:
keys_generator = UniqueKeyGenerator
my_keys = self.get_all_keys_in_store()
my_keys = my_keys[~my_keys.mask]
if not keys is None:
if len(keys) != len(my_keys):
raise Exception('not enough new keys given for the copy')
else:
particle_keys = keys
else:
particle_keys = keys_generator.next_set_of_keys(len(my_keys))
attributes = self.get_attribute_names_defined_in_store()
attributes = [x for x in attributes if filter_attributes(self, x)]
values = self.get_values_in_store(my_keys, attributes)
result = Particles()
if memento is None:
memento = {}
converted = []
for x in values:
if isinstance(x, LinkedArray):
converted.append(x.copy(memento, keep_structure, filter_attributes))
else:
converted.append(x)
memento[id(self._original_set())] = result
result.add_particles_to_store(particle_keys, attributes, converted)
result._private.collection_attributes = self._private.collection_attributes._copy_for_collection(result)
return result
def __str__(self):
"""
Display string of a particle subset.
>>> particles = Particles(keys=[1,2])
>>> particles[0].child = particles[1]
>>> print particles.child[0] # doctest: +ELLIPSIS
Particle(2, set=<...>
, child=None)
>>> print particles.child[1]
None
>>> print particles[0] # doctest: +ELLIPSIS
Particle(1, set=<...>
, child=Particle(2, set=<...>))
"""
keys = numpy.where(self.get_valid_particles_mask(), self._private.keys, [None])
return str(list(keys))
def as_set(self):
return self
class ParticlesOverlay(AbstractParticleSet):
"""An overlay of of a particles set. The overlay
stores extra attributes for the particles in the
overlayed set.
>>> p1 = Particles(3)
>>> p1.mass = [10.0, 20.0, 30.0] | units.kg
>>> p2 = ParticlesOverlay(p1)
>>> p2.radius = [4.0, 5.0, 6.0] | units.m
>>> print len(p2)
3
>>> print p2.mass
[10.0, 20.0, 30.0] kg
"""
def __init__(self, particles, overlay_set = None):
AbstractParticleSet.__init__(self)
self._private.base_set = particles
if overlay_set is None:
overlay_set = Particles(keys = self._private.base_set.key)
self._private.overlay_set = overlay_set
self._private.base_version = self._private.base_set._get_version()
object.__setattr__(self, "_derived_attributes", particles._derived_attributes)
def _ensure_updated_set_properties(self):
if self._private.base_version == self._private.base_set._get_version():
return
self._private.base_version = self._private.base_set._get_version()
base_set = self._private.base_set
overlay_set = self._private.overlay_set
base_set_keys = base_set.key
overlay_set_keys = overlay_set.key
removed_indices = []
index = 0
index_in_overlay = 0
while index < len(base_set_keys) and index_in_overlay < len(overlay_set_keys):
key1 = base_set_keys[index]
key2 = overlay_set_keys[index_in_overlay]
if key2 == key1:
index_in_overlay += 1
index += 1
else:
while key2 != key1:
removed_indices.append(index_in_overlay)
index_in_overlay += 1
if index_in_overlay >= len(overlay_set_keys):
break
key2 = overlay_set_keys[index_in_overlay]
added_keys = []
if index_in_overlay >= len(overlay_set_keys):
added_keys = base_set_keys[index:]
overlay_set.remove_particles_from_store(removed_indices)
if len(added_keys) > 0:
overlay_set.add_particles_to_store(added_keys)
def can_extend_attributes(self):
return self._private.overlay_set.can_extend_attributes()
def __len__(self):
return len(self._private.overlay_set)
def _get_version(self):
return self._private.overlay_set._get_version() + self._private.base_set._get_version()
def __getitem__(self, index):
self._ensure_updated_set_properties()
keys = self.get_all_keys_in_store()[index]
if hasattr(keys, '__iter__'):
return self._subset(keys)
else:
return Particle(keys, self)
def _split_attributes(self, attributes):
inbase = set(self._private.base_set.get_attribute_names_defined_in_store())
inbase |= set(self._private.base_set._derived_attributes.keys())
attributes_inbase = []
attributes_inoverlay = []
indices_inbase = []
indices_inoverlay = []
for i,x in enumerate(attributes):
if x in inbase:
attributes_inbase.append(x)
indices_inbase.append(i)
else:
attributes_inoverlay.append(x)
indices_inoverlay.append(i)
return (attributes_inbase, indices_inbase), (attributes_inoverlay, indices_inoverlay)
def _split_attributes_and_values(self, attributes, values, is_setter = True):
if is_setter:
inbase = set(self._private.base_set.get_settable_attribute_names_defined_in_store())
else:
inbase = set(self._private.base_set.get_attribute_names_defined_in_store())
attributes_inbase = []
attributes_inoverlay = []
values_inbase = []
values_inoverlay = []
for x,y in zip(attributes, values):
if x in inbase:
attributes_inbase.append(x)
values_inbase.append(y)
else:
attributes_inoverlay.append(x)
values_inoverlay.append(y)
return (attributes_inbase, values_inbase), (attributes_inoverlay, values_inoverlay)
def add_particles_to_store(self, keys, attributes = [], values = []):
self._ensure_updated_set_properties()
(
(attributes_inbase, values_inbase),
(attributes_inoverlay, values_inoverlay)
) = self._split_attributes_and_values(attributes, values)
self._private.base_set.add_particles_to_store(keys, attributes_inbase, values_inbase)
self._private.overlay_set.add_particles_to_store(keys, attributes_inoverlay, values_inoverlay)
self._private.base_version = self._private.base_set._get_version()
def remove_particles_from_store(self, indices):
self._ensure_updated_set_properties()
indices = numpy.asarray(indices)
self._private.base_set.remove_particles_from_store(indices[...,0])
self._private.overlay_set.remove_particles_from_store(indices[...,1])
self._private.base_version = self._private.base_set._get_version()
def get_values_in_store(self, indices, attributes):
self._ensure_updated_set_properties()
(
(attributes_inbase, indices_inbase),
(attributes_inoverlay, indices_inoverlay)
) = self._split_attributes(attributes)
if indices is None or isinstance(indices,EllipsisType):
indices0 = self._private.base_set.get_all_indices_in_store()
indices1 = self._private.overlay_set.get_all_indices_in_store()
else:
indices0 = []
indices1 = []
for i0, i1 in indices:
indices0.append(i0)
indices1.append(i1)
indices0 = numpy.asarray(indices0, dtype='int64')
indices1 = numpy.asarray(indices1, dtype='int64')
result = [None] * len(attributes)
if len(attributes_inbase) > 0:
values_inbase = self._private.base_set.get_values_in_store(indices0, attributes_inbase)
for i, value in zip(indices_inbase, values_inbase):
result[i] = value
if len(attributes_inoverlay) > 0:
values_inoverlay = self._private.overlay_set.get_values_in_store(indices1, attributes_inoverlay)
for i, value in zip(indices_inoverlay, values_inoverlay):
result[i] = value
return result
def set_values_in_store(self, indices, attributes, values):
self._ensure_updated_set_properties()
(
(attributes_inbase, values_inbase),
(attributes_inoverlay, values_inoverlay)
) = self._split_attributes_and_values(attributes, values)
if indices is None:
indices0 = self._private.base_set.get_all_indices_in_store()
indices1 = self._private.overlay_set.get_all_indices_in_store()
else:
indices0 = []
indices1 = []
for i0, i1 in indices:
indices0.append(i0)
indices1.append(i1)
indices0 = numpy.asarray(indices0, dtype='int64')
indices1 = numpy.asarray(indices1, dtype='int64')
if len(attributes_inbase) > 0:
self._private.base_set.set_values_in_store(indices0, attributes_inbase, values_inbase)
if len(attributes_inoverlay) > 0:
self._private.overlay_set.set_values_in_store(indices1, attributes_inoverlay, values_inoverlay)
def get_attribute_names_defined_in_store(self):
result = list(self._private.base_set.get_attribute_names_defined_in_store())
result.extend(self._private.overlay_set.get_attribute_names_defined_in_store())
return result
def get_settable_attribute_names_defined_in_store(self):
result = list(self._private.base_set.get_settable_attribute_names_defined_in_store())
result.extend(self._private.overlay_set.get_settable_attribute_names_defined_in_store())
return result
def get_all_keys_in_store(self):
self._ensure_updated_set_properties()
return self._private.overlay_set.get_all_keys_in_store()
def get_all_indices_in_store(self):
self._ensure_updated_set_properties()
indices0 = self._private.base_set.get_all_indices_in_store()
indices1 = self._private.overlay_set.get_all_indices_in_store()
return list(zip(indices0, indices1))
def get_indices_of_keys(self, keys):
self._ensure_updated_set_properties()
indices0 = self._private.base_set.get_indices_of_keys(keys)
indices1 = self._private.overlay_set.get_indices_of_keys(keys)
return list(zip(indices0, indices1))
def has_key_in_store(self, key):
self._ensure_updated_set_properties()
return self._private.overlay_set.has_key_in_store(key)
def _original_set(self):
return self
def synchronize_to(self, other_particles):
"""
Synchronize the particles of this set
with the contents of the provided set.
After this call the `other_particles` set will have
the same particles as this set.
This call will check if particles have been removed or
added it will not copy values of existing particles
over.
:parameter other_particles: particle set wich has to be updated
>>> particles = Particles(2)
>>> particles.x = [1.0, 2.0] | units.m
>>> copy = particles.copy()
>>> new_particle = Particle()
>>> new_particle.x = 3.0 | units.m
>>> particles.add_particle(new_particle)# doctest:+ELLIPSIS
<amuse.datamodel.particles.Particle object at ...>
>>> print particles.x
[1.0, 2.0, 3.0] m
>>> print copy.x
[1.0, 2.0] m
>>> particles.synchronize_to(copy)
>>> print copy.x
[1.0, 2.0, 3.0] m
"""
other_keys = set(other_particles.get_all_keys_in_store())
my_keys = set(self.get_all_keys_in_store())
added_keys = numpy.asarray(list(my_keys - other_keys))
removed_keys = other_keys - my_keys
removed_keys = list(removed_keys)
if len(removed_keys) > 0:
other_particles.remove_particles_from_store(other_particles.get_indices_of_keys(removed_keys))
if len(added_keys) > 0:
added_indices = self.get_indices_of_keys(added_keys)
attributes = self.get_attribute_names_defined_in_store()
attributes= [x for x in attributes if x not in other_particles._derived_attributes]
values = self.get_values_in_store(added_indices, attributes)
converted = []
memento = {}
for x in values:
if isinstance(x, LinkedArray):
converted.append(x.copy_with_link_transfer(self._original_set(), other_particles, True, memento))
#converted.append(x.copy(memento, False))
else:
converted.append(x)
try:
other_particles.add_particles_to_store(added_keys, attributes, converted)
except exceptions.MissingAttributesAmuseException as caught_exception:
for attribute_name in caught_exception.missing_attributes:
if attribute_name in self._derived_attributes:
attributes.append(attribute_name)
converted.append(getattr(self._subset(added_keys), attribute_name))
else:
raise
other_particles.add_particles_to_store(added_keys, attributes, converted)
class ParticlesWithFilteredAttributes(AbstractParticleSet):
"""An overlaytof a particles set. The overlay
filters attributes for the particles in the
overlayed set.
>>> p1 = Particles(3)
>>> p1.mass = [10.0, 20.0, 30.0] | units.kg
>>> p1.radius = [10.0, 20.0, 30.0] | units.m
>>> p2 = ParticlesWithFilteredAttributes(p1, ["mass"])
>>> print p2.get_attribute_names_defined_in_store()
['mass']
>>> print p1.get_attribute_names_defined_in_store()
['mass', 'radius']
"""
def __init__(self, particles, attributes_names, new_names = []):
AbstractParticleSet.__init__(self)
self._private.base_set = particles
self._private.attribute_names = attributes_names
if len(new_names) == 0:
converted_names = attributes_names
else:
converted_names = []
for set_name, converted_name in zip(attributes_names, new_names):
if not converted_name:
converted_names.append(set_name)
else:
converted_names.append(converted_name)
mapping_from_set_name_to_converted_name = {}
mapping_from_converted_name_to_set_name = {}
for set_name, converted_name in zip(attributes_names, converted_names):
mapping_from_set_name_to_converted_name[set_name] = converted_name
mapping_from_converted_name_to_set_name[converted_name] = set_name
self._private.mapping_from_set_name_to_converted_name = mapping_from_set_name_to_converted_name
self._private.mapping_from_converted_name_to_set_name = mapping_from_converted_name_to_set_name
self._private.converted_names = converted_names
def _convert_to_base_set(self, attributes):
result = []
for x in attributes:
if not x in self._private.mapping_from_converted_name_to_set_name:
raise exceptions.AmuseException("attribute '{0}' not defined for this set".format(x))
result.append(self._private.mapping_from_converted_name_to_set_name[x])
return result
def _filter_and_convert_to_this(self, attributes):
return [self._private.mapping_from_set_name_to_converted_name[x] for x in attributes if x in self._private.mapping_from_set_name_to_converted_name]
def can_extend_attributes(self):
return False
def __len__(self):
return len(self._private.base_set)
def _get_version(self):
return self._private.base_set._get_version()
def __getitem__(self, index):
keys = self.get_all_keys_in_store()[index]
if hasattr(keys, '__iter__'):
return self._subset(keys)
else:
return Particle(keys, self)
def add_particles_to_store(self, keys, attributes = [], values = []):
attributes_inbase = self._convert_to_base_set(attributes)
self._private.base_set.add_particles_to_store(keys, attributes_inbase, values)
def remove_particles_from_store(self, indices):
self._private.base_set.remove_particles_from_store(indices)
def get_values_in_store(self, indices, attributes):
attributes_inbase = self._convert_to_base_set(attributes)
return self._private.base_set.get_values_in_store(indices, attributes_inbase)
def set_values_in_store(self, indices, attributes, values):
attributes_inbase = self._convert_to_base_set(attributes)
self._private.base_set.set_values_in_store(indices, attributes_inbase, values)
def get_attribute_names_defined_in_store(self):
return self._private.converted_names
def get_settable_attribute_names_defined_in_store(self):
result = list(self._private.base_set.get_settable_attribute_names_defined_in_store())
return self._filter_and_convert_to_this(result)
def get_all_keys_in_store(self):
return self._private.base_set.get_all_keys_in_store()
def get_all_indices_in_store(self):
return self._private.base_set.get_all_indices_in_store()
def get_indices_of_keys(self, keys):
return self._private.base_set.get_indices_of_keys(keys)
def has_key_in_store(self, key):
return self._private.base_set.has_key_in_store(key)
def _original_set(self):
return self
class ParticlesWithUnitsConverted(AbstractParticleSet):
"""
A view on a particle sets. Used when to convert
values between incompatible sets of units. For example to
convert from si units to nbody units.
The converter must have implement the ConverterInterface.
>>> from amuse.units import nbody_system
>>> particles_nbody = Particles(2)
>>> particles_nbody.x = [10.0 , 20.0] | nbody_system.length
>>> convert_nbody = nbody_system.nbody_to_si(10 | units.kg , 5 | units.m )
>>> particles_si = ParticlesWithUnitsConverted(
... particles_nbody,
... convert_nbody.as_converter_from_si_to_nbody())
...
>>> print particles_nbody.x
[10.0, 20.0] length
>>> print particles_si.x
[50.0, 100.0] m
>>> particles_si.x = [200.0, 400.0] | units.m
>>> print particles_nbody.x
[40.0, 80.0] length
"""
class ConverterInterface(object):
"""
Interface definition for the converter.
source
The source quantity is in the units of the user of a
ParticlesWithUnitsConverted object
target
The target quantity must be in the units of the
internal particles set.
"""
def from_source_to_target(quantity):
"""
Converts the quantity from the source units
to the target units.
:parameter quantity: quantity to convert
"""
return quantity
def from_target_to_source(quantity):
"""
Converts the quantity from the target units
to the source units.
:parameter quantity: quantity to convert
"""
return quantity
def __init__(self, particles, converter):
AbstractParticleSet.__init__(self, particles)
self._private.particles = particles
self._private.converter = converter
def compressed(self):
return ParticlesWithUnitsConverted(self._private.particles.compressed(), self._private.converter)
def get_valid_particles_mask(self):
return self._private.particles.get_valid_particles_mask()
def __getitem__(self, index):
keys = self.get_all_keys_in_store()[index]
if keys is ma.masked:
return None
elif hasattr(keys, '__iter__'):
return self._subset(keys)
else:
return Particle(keys, self)
def _get_version(self):
return self._private.particles._get_version()
def shallow_copy(self):
copiedParticles = self._private.particles.shallow_copy()
return ParticlesWithUnitsConverted(copiedParticles, self._private.converter)
def unconverted_set(self):
return self._private.particles
def can_extend_attributes(self):
return self._private.particles.can_extend_attributes()
def add_particles_to_store(self, keys, attributes = [], values = []):
converted_values = []
for quantity in values:
converted_quantity = self._private.converter.from_source_to_target(quantity)
converted_values.append(converted_quantity)
self._private.particles.add_particles_to_store(keys, attributes, converted_values)
def remove_particles_from_store(self, keys):
self._private.particles.remove_particles_from_store(keys)
def get_values_in_store(self, indices, attributes):
values = self._private.particles.get_values_in_store(indices, attributes)
converted_values = []
for quantity in values:
if isinstance(quantity, LinkedArray):
objects = quantity
convert_objects = []
for x in objects:
if x is None:
convert_objects.append(x)
else:
if isinstance(x, Particle):
convert_objects.append(ParticlesWithUnitsConverted(x.as_set(), self._private.converter)[0])
else:
convert_objects.append(ParticlesWithUnitsConverted(x, self._private.converter))
convert_objects = LinkedArray(convert_objects)
converted_values.append(convert_objects)
else:
converted_quantity = self._private.converter.from_target_to_source(quantity)
converted_values.append(converted_quantity)
return converted_values
def set_values_in_store(self, indices, attributes, values):
converted_values = []
for quantity in values:
converted_quantity = self._private.converter.from_source_to_target(quantity)
converted_values.append(converted_quantity)
self._private.particles.set_values_in_store(indices, attributes, converted_values)
def get_attribute_names_defined_in_store(self):
return self._private.particles.get_attribute_names_defined_in_store()
def get_settable_attribute_names_defined_in_store(self):
return self._private.particles.get_settable_attribute_names_defined_in_store()
def get_all_keys_in_store(self):
return self._private.particles.get_all_keys_in_store()
def get_all_indices_in_store(self):
return self._private.particles.get_all_indices_in_store()
def get_indices_of_keys(self, keys):
return self._private.particles.get_indices_of_keys(keys)
def has_key_in_store(self, key):
return self._private.particles.has_key_in_store(key)
def as_set(self):
return ParticlesSubset(self, self.get_all_keys_in_store())
def get_timestamp(self):
timestamp = self._private.particles.get_timestamp()
if not timestamp is None:
timestamp = self._private.converter.from_target_to_source(timestamp)
return timestamp
def savepointsavepoint(self, timestamp=None):
if not timestamp is None:
timestamp = self._private.converter.from_target_to_source(timestamp)
return ParticlesWithUnitsConverted(
self._private.particles.savepoint(timestamp),
self._private.converter
)
def previous_state(self):
return ParticlesWithUnitsConverted(
self._private.particles.previous_state(),
self._private.converter
)
def get_unit_converter(self):
return self._private.converter.generic_to_si
class ParticlesWithAttributesTransformed(AbstractParticleSet):
"""
A view on a particle sets. Some attributes are transformed
on input or output.
"""
def __init__(self, particles, get_function, set_function):
AbstractParticleSet.__init__(self, particles)
self._private.particles = particles
self._private.get_function = get_function
self._private.set_function = set_function
@classmethod
def translate(cls, particles, position, velocity):
def get_function(attribute, quantity):
if(attribute == 'x'):
return quantity + position[0]
elif(attribute == 'y'):
return quantity + position[1]
elif(attribute == 'z'):
return quantity + position[2]
elif(attribute == 'vx'):
return quantity + velocity[0]
elif(attribute == 'vy'):
return quantity + velocity[1]
elif(attribute == 'vz'):
return quantity + velocity[2]
else:
return quantity
def set_function(attribute, quantity):
if(attribute == 'x'):
return quantity - position[0]
elif(attribute == 'y'):
return quantity - position[1]
elif(attribute == 'z'):
return quantity - position[2]
elif(attribute == 'vx'):
return quantity - velocity[0]
elif(attribute == 'vy'):
return quantity - velocity[1]
elif(attribute == 'vz'):
return quantity - velocity[2]
else:
return quantity
return cls(
particles,
get_function,
set_function
)
def compressed(self):
return ParticlesWithAttributesTransformed(
self._private.particles.compressed(),
self._private.get_function,
self._private.set_function
)
def get_valid_particles_mask(self):
return self._private.particles.get_valid_particles_mask()
def __getitem__(self, index):
keys = self.get_all_keys_in_store()[index]
if keys is ma.masked:
return None
elif hasattr(keys, '__iter__'):
return self._subset(keys)
else:
return Particle(keys, self)
def _get_version(self):
return self._private.particles._get_version()
def shallow_copy(self):
copiedParticles = self._private.particles.shallow_copy()
return ParticlesWithAttributesTransformed(
copiedParticles,
self._private.get_function,
self._private.set_function,
)
def unconverted_set(self):
return self._private.particles
def can_extend_attributes(self):
return self._private.particles.can_extend_attributes()
def add_particles_to_store(self, keys, attributes = [], values = []):
converted_values = []
for attribute, quantity in zip(attributes, values):
converted_quantity = self._private.set_function(attribute, quantity)
converted_values.append(converted_quantity)
self._private.particles.add_particles_to_store(keys, attributes, converted_values)
def remove_particles_from_store(self, keys):
self._private.particles.remove_particles_from_store(keys)
def get_values_in_store(self, indices, attributes):
values = self._private.particles.get_values_in_store(indices, attributes)
converted_values = []
for attribute, quantity in zip(attributes, values):
if isinstance(quantity, LinkedArray):
objects = quantity
convert_objects = []
for x in objects:
if x is None:
convert_objects.append(x)
else:
if isinstance(x, Particle):
convert_objects.append(ParticlesWithAttributesTransformed(x.as_set(), self._private.get_function, self._private.set_function)[0])
else:
convert_objects.append(ParticlesWithAttributesTransformed(x, self._private.get_function, self._private.set_function))
convert_objects = LinkedArray(convert_objects)
converted_values.append(convert_objects)
else:
converted_quantity = self._private.get_function(attribute, quantity)
converted_values.append(converted_quantity)
return converted_values
def set_values_in_store(self, indices, attributes, values):
converted_values = []
for attribute, quantity in zip(attributes, values):
converted_quantity = self._private.set_function(attribute, quantity)
converted_values.append(converted_quantity)
self._private.particles.set_values_in_store(indices, attributes, converted_values)
def get_attribute_names_defined_in_store(self):
return self._private.particles.get_attribute_names_defined_in_store()
def get_settable_attribute_names_defined_in_store(self):
return self._private.particles.get_settable_attribute_names_defined_in_store()
def get_all_keys_in_store(self):
return self._private.particles.get_all_keys_in_store()
def get_all_indices_in_store(self):
return self._private.particles.get_all_indices_in_store()
def get_indices_of_keys(self, keys):
return self._private.particles.get_indices_of_keys(keys)
def has_key_in_store(self, key):
return self._private.particles.has_key_in_store(key)
def as_set(self):
return ParticlesSubset(self, self.get_all_keys_in_store())
def get_timestamp(self):
timestamp = self._private.particles.get_timestamp()
if not timestamp is None:
timestamp = self._private.get_function("timestamp", timestamp)
return timestamp
def savepoint(self, timestamp=None, **kwargs):
if not timestamp is None:
timestamp = self._private.set_function("timestamp", timestamp)
return ParticlesWithAttributesTransformed(
self._private.particles.savepoint(timestamp),
self._private.get_function,
self._private.set_function,
)
def previous_state(self):
return ParticlesWithAttributesTransformed(
self._private.particles.previous_state(),
self._private.get_function,
self._private.set_function,
)
class TransformedParticles(ParticlesWithAttributesTransformed):
"""
Particleset which is a view on the parent particle set with some attributes transformed.
This is a more flexible reimplementation of ParticlesWithAttributesTransformed such
that general coordinate trasnformations are possible.
"""
@classmethod
def translate(cls, particles, position, velocity):
def forward(x,y,z,vx,vy,vz):
return [x+position[0],
y+position[1],
z+position[2],
vx+velocity[0],
vy+velocity[1],
vz+velocity[2],]
def reverse(x,y,z,vx,vy,vz):
return [x-position[0],
y-position[1],
z-position[2],
vx-velocity[0],
vy-velocity[1],
vz-velocity[2],]
return cls(
particles,
["x","y","z","vx","vy","vz"],
forward,
["x","y","z","vx","vy","vz"],
reverse
)
@classmethod
def rotate_z(cls, particles, angle, omega):
def forward(x,y,vx,vy, inverse=False):
_angle=angle
_omega=omega
if inverse:
_angle=-angle
_omega=-omega
C1 = vx + _omega*y
C2 = vy - _omega*x
x_ = x * trigo.cos(_angle) + y * trigo.sin(_angle)
y_ = -x * trigo.sin(_angle) + y * trigo.cos(_angle)
vx_ = C1*trigo.cos(_angle) + C2*trigo.sin(_angle)
vy_ = C2*trigo.cos(_angle) - C1*trigo.sin(_angle)
return x_,y_,vx_,vy_
def reverse(x,y,vx,vy):
return forward(x,y,vx,vy, inverse=True)
return cls(
particles,
["x","y","vx","vy"],
forward,
["x","y","vx","vy"],
reverse
)
def __init__(self, particles, target_attributes,
forward_transformation, source_attributes, reverse_transformation=None):
AbstractParticleSet.__init__(self, particles)
self._private.particles = particles
self._private.source_attributes=source_attributes
self._private.target_attributes=target_attributes
self._private.forward_transformation = forward_transformation
self._private.reverse_transformation = reverse_transformation
if forward_transformation is None:
self._private.forward_transformation = lambda *x:x
self._private.reverse_transformation = lambda *x:x
def _factory(self, particles):
return TransformedParticles( particles,
self._private.target_attributes,
self._private.forward_transformation,
self._private.source_attributes,
self._private.reverse_transformation )
def compressed(self):
return self._factory(self._private.particles.compressed())
def shallow_copy(self):
copiedParticles = self._private.particles.shallow_copy()
return self._factory(copiedParticles)
def get_values_in_store(self, indices, attributes):
direct_attributes=[]
needs_transformation=False
for attribute in attributes:
if attribute in self._private.target_attributes:
needs_transformation=True
else:
direct_attributes.append(attribute)
if needs_transformation:
sources=self._private.particles.get_values_in_store(indices, self._private.source_attributes)
targets=self._private.forward_transformation(*sources)
if direct_attributes:
direct_values = self._private.particles.get_values_in_store(indices, direct_attributes)
values=[]
for attribute in attributes:
if attribute in self._private.target_attributes:
value=targets[self._private.target_attributes.index(attribute)]
else:
value=direct_values[direct_attributes.index(attribute)]
values.append(value)
converted_values = []
for attribute, quantity in zip(attributes, values):
if isinstance(quantity, LinkedArray):
objects = quantity
convert_objects = []
for x in objects:
if x is None:
convert_objects.append(x)
else:
transformed_x=self._factory(x.as_set())
if isinstance(x, Particle):
transformed_x=x[0]
convert_objects.append(transformed_x)
convert_objects = LinkedArray(convert_objects)
converted_values.append(convert_objects)
else:
converted_values.append(quantity)
return converted_values
def set_values_in_store(self, indices, attributes, values):
_attributes=[]
_values=[]
needs_transformation=False
for attribute, value in zip(attributes, values):
if attribute in self._private.target_attributes:
needs_transformation=True
else:
_attributes.append(attribute)
_values.append(value)
if needs_transformation:
if self._private.reverse_transformation is None:
raise Exception("no reverse_transformation defined, view on particleset is readonly")
missing=[]
for attribute in self._private.target_attributes:
if attribute not in attributes:
missing.append(attribute)
if missing:
sources=self._private.particles.get_values_in_store(indices, self._private.source_attributes)
targets=self._private.forward_transformation(*sources)
args=[]
for attribute in self._private.target_attributes:
if attribute in missing:
value=targets[self._private.target_attributes.index(attribute)]
else:
value=values[attributes.index(attribute)]
args.append(value)
sources=self._private.reverse_transformation(*args)
for attribute,value in zip(self._private.source_attributes, sources):
_attributes.append(attribute)
_values.append(value)
self._private.particles.set_values_in_store(indices, _attributes, _values)
def add_particles_to_store(self, keys, attributes = [], values = []):
_attributes=[]
_values=[]
needs_transformation=False
for attribute, value in zip(attributes, values):
if attribute in self._private.target_attributes:
needs_transformation=True
else:
_attributes.append(attribute)
_values.append(value)
if needs_transformation:
if self._private.reverse_transformation is None:
raise Exception("no reverse_transformation defined, view on particleset is readonly")
missing=[]
for attribute in self._private.target_attributes:
if attribute not in attributes:
missing.append(attribute)
if missing:
raise Exception("For this add_particle operation I am missing the following attributes:"+ str(missing))
args=[]
for attribute in self._private.target_attributes:
value=values[attributes.index(attribute)]
args.append(value)
sources=self._private.reverse_transformation(*args)
for attribute,value in zip(self._private.source_attributes, sources):
_attributes.append(attribute)
_values.append(value)
self._private.particles.add_particles_to_store(keys, _attributes, _values)
def get_timestamp(self):
return self._private.particles.get_timestamp()
def savepoint(self, timestamp=None, **kwargs):
return self._factory(self._private.particles.savepoint(timestamp))
def previous_state(self):
return self._factory(self._private.particles.previous_state())
def get_subsets(self):
return list([ self._factory(particles) for particles in self._private.particles.get_subsets()])
def get_subset(self, name):
return self._factory(self._private.particles.get_subset(name))
class ParticleInformationChannel(object):
def __init__(self, from_particles, to_particles, attributes=None, target_names=None):
self.from_particles = from_particles
self.to_particles = to_particles
self.attributes = attributes
self.target_names = target_names
self.from_version = -1
self.to_version = -1
self._reindex()
def _reindex(self):
if (
(self.from_version == self.from_particles._get_version()) and
(self.to_version == self.to_particles._get_version())
):
return
self.keys = self.intersecting_keys()
self.from_indices = self.from_particles.get_indices_of_keys(self.keys)
self.to_indices = self.to_particles.get_indices_of_keys(self.keys)
self.from_version = self.from_particles._get_version()
self.to_version = self.to_particles._get_version()
def reverse(self):
if self.target_names is None:
attributes = self.attributes
target_names = self.target_names
else:
attributes = self.target_names
target_names = self.attributes
return ParticleInformationChannel(
self.to_particles,
self.from_particles,
attributes,
target_names
)
def intersecting_keys(self):
from_keys = self.from_particles.get_all_keys_in_store()
to_keys = self.to_particles.get_all_keys_in_store()
return numpy.intersect1d(from_keys,to_keys) #filter(lambda x : self.to_particles._has_key(x), from_keys)
def copy_attributes(self, attributes, target_names = None):
if target_names is None:
target_names = attributes
self._reindex()
if len(self.to_indices) == 0:
return
if len(self.from_indices) == 0:
return
if len(self.keys) == 0:
return
values = self.from_particles.get_values_in_store(self.from_indices, attributes)
converted = []
for x in values:
if isinstance(x, LinkedArray):
converted.append(x.copy_with_link_transfer(self.from_particles, self.to_particles))
else:
converted.append(x)
self.to_particles.set_values_in_store(self.to_indices, target_names, converted)
def copy_attributes_async(self, attributes, target_names = None, async_get = True, async_set = False):
if target_names is None:
target_names = attributes
self._reindex()
if len(self.to_indices) == 0:
return
if len(self.from_indices) == 0:
return
if len(self.keys) == 0:
return
if async_get:
request = self.from_particles.get_values_in_store_async(self.from_indices, attributes)
def result_handler(inner):
values = inner()
converted = []
for x in values:
if isinstance(x, LinkedArray):
converted.append(x.copy_with_link_transfer(self.from_particles, self.to_particles))
else:
converted.append(x)
self.to_particles.set_values_in_store(self.to_indices, target_names, converted)
return converted
request.add_result_handler(result_handler)
return request
elif async_set:
values = self.from_particles.get_values_in_store(self.from_indices, attributes)
converted = []
for x in values:
if isinstance(x, LinkedArray):
converted.append(x.copy_with_link_transfer(self.from_particles, self.to_particles))
else:
converted.append(x)
request = self.to_particles.set_values_in_store_async(self.to_indices, target_names, converted)
return request
def copy(self):
if not self.attributes is None:
self.copy_attributes(self.attributes, self.target_names)
elif not self.to_particles.can_extend_attributes():
self.copy_overlapping_attributes()
else:
self.copy_all_attributes()
def copy_all_attributes(self):
names_to_copy = self.from_particles.get_attribute_names_defined_in_store()
self.copy_attributes(list(names_to_copy))
def copy_overlapping_attributes(self):
from_names = self.from_particles.get_attribute_names_defined_in_store()
to_names = self.to_particles.get_settable_attribute_names_defined_in_store()
names_to_copy = set(from_names).intersection(set(to_names))
self.copy_attributes(list(names_to_copy))
def copy_attribute(self, name, target_name = None):
""" Copy the values of one attribute from the source set to the target set.
The copied attribute can have a different name in the target set.
:argument name: name of the attribute in the source set
:argument target_name: name of the attribute in the target set, when None (default) the target_name
will be set equal to the name
>>> from amuse.datamodel import Particles
>>> from amuse.units import units
>>> particles1 = Particles(2)
>>> particles2 = particles1.copy()
>>> particles1.mass = 1 | units.m
>>> particles2.mass = 3 | units.m
>>> channel = particles1.new_channel_to(particles2)
>>> channel.copy_attribute("mass", "mass_from_p2")
>>> print particles2.mass_from_p2
[1.0, 1.0] m
>>> print particles2.mass - particles2.mass_from_p2
[2.0, 2.0] m
"""
if target_name is None:
target_name = name
self._reindex()
if len(self.keys) == 0:
return
data = self.from_particles.get_values_in_store(self.from_indices, [name,])
self.to_particles.set_values_in_store(self.to_indices, [target_name,], data)
def transform_values(self, attributes, f):
values = self.from_particles.get_values_in_store(self.from_indices, attributes)
return f(*values)
def transform(self, target, function, source):
""" Copy and transform values of one attribute from the source set to the target set.
:argument target: name of the attributes in the target set
:argument function: function used for transform, should return tuple
:argument source: name of the attribute in the source set
>>> from amuse.datamodel import Particles
>>> particles1 = Particles(3)
>>> particles2 = particles1.copy()
>>> particles1.attribute1 = 1
>>> particles1.attribute2 = 2
>>> channel = particles1.new_channel_to(particles2)
>>> channel.transform(["attribute3"], lambda x,y: (x+y,), ["attribute1","attribute2"])
>>> print particles2.attribute3
[3 3 3]
>>> channel.transform(["attribute1","attribute1b"], lambda x: (x,2*x), ["attribute1"])
>>> print particles2.attribute1b
[2 2 2]
"""
self._reindex()
if len(self.keys) == 0:
return
if function is None:
function=lambda *x : x
if not self.to_particles.can_extend_attributes():
target_attributes = self.to_particles.get_settable_attribute_names_defined_in_store()
if not set(target).issubset(set(target_attributes)):
raise Exception("trying to set unsettable attributes {0}".format(
list(set(target)-set(target_attributes))) )
converted=self.transform_values(source, function)
if len(converted) != len(target):
raise Exception("function {0} returns {1} values while target attributes are {2} of length {3}".format(
function.__name__, len(converted), target, len(target)))
self.to_particles.set_values_in_store(self.to_indices, target, converted)
class Channels(object):
def __init__(self, channels=None):
self._channels = []
if channels is not None:
self.add_channels(channels)
def add_channel(self, channel):
self._channels.append(channel)
def add_channels(self, channels):
if isinstance(channels, Channels):
self._channels += channels._channels
else:
for chan in iter(channels):
self.add_channel(chan)
def remove_channel(self, channel):
self._channels.remove(channel)
def copy(self):
for channel in self._channels:
channel.copy()
class ParticlesWithNamespacedAttributesView(AbstractParticleSet):
"""
A view on prefixed attributes of a particle set.
"""
def __init__(self, particles, namespace):
AbstractParticleSet.__init__(self, particles)
self._private.particles = particles
self._private.namespace = namespace
def compressed(self):
return ParticlesWithNamespacedAttributesView(
self._private.particles.compressed(),
self._private.namespace
)
def get_valid_particles_mask(self):
return self._private.particles.get_valid_particles_mask()
def __getitem__(self, index):
keys = self.get_all_keys_in_store()[index]
if keys is ma.masked:
return None
elif hasattr(keys, '__iter__'):
return self._subset(keys)
else:
return Particle(keys, self)
def _get_version(self):
return self._private.particles._get_version()
def shallow_copy(self):
copiedParticles = self._private.particles.shallow_copy()
return ParticlesWithNamespacedAttributesView(
copiedParticles,
self._private.namespace
)
def unconverted_set(self):
return self._private.particles
def can_extend_attributes(self):
return self._private.particles.can_extend_attributes()
def add_particles_to_store(self, keys, attributes = [], values = []):
namespace = self._private.namespace
namespaced_attributes = [namespace + '__' + x for x in attributes]
self._private.particles.add_particles_to_store(keys, namespaced_attributes, values)
def remove_particles_from_store(self, keys):
self._private.particles.remove_particles_from_store(keys)
def get_values_in_store(self, indices, attributes):
namespace = self._private.namespace
namespaced_attributes = [namespace + '__' + x for x in attributes]
return self._private.particles.get_values_in_store(indices, namespaced_attributes)
def set_values_in_store(self, indices, attributes, values):
namespace = self._private.namespace
namespaced_attributes = [namespace + '__' + x for x in attributes]
self._private.particles.set_values_in_store(indices, namespaced_attributes, values)
def get_attribute_names_defined_in_store(self):
names = self._private.particles.get_attribute_names_defined_in_store()
namespace_prefix = self._private.namespace + '__'
len_namespace_prefix = len(namespace_prefix)
return [x[len_namespace_prefix:] for x in names if x.startswith(namespace_prefix)]
def get_settable_attribute_names_defined_in_store(self):
names = self._private.particles.get_settable_attribute_names_defined_in_store()
namespace_prefix = self._private.namespace + '__'
len_namespace_prefix = len(namespace_prefix)
return [x[len_namespace_prefix:] for x in names if x.startswith(namespace_prefix)]
def get_all_keys_in_store(self):
return self._private.particles.get_all_keys_in_store()
def get_all_indices_in_store(self):
return self._private.particles.get_all_indices_in_store()
def get_indices_of_keys(self, keys):
return self._private.particles.get_indices_of_keys(keys)
def has_key_in_store(self, key):
return self._private.particles.has_key_in_store(key)
def as_set(self):
return ParticlesSubset(self, self.get_all_keys_in_store())
def get_timestamp(self):
return self._private.particles.get_timestamp()
def savepoint(self, timestamp=None):
return ParticlesWithNamespacedAttributesView(
self._private.particles.savepoint(timestamp),
self._private.namespace
)
def previous_state(self):
return ParticlesWithNamespacedAttributesView(
self._private.particles.previous_state(),
self._private.namespace
)
class DomainAttribute(DerivedAttribute):
"""
Combine multiple attributes into the same namespace
"""
def __init__(self, name):
self.name = name
def get_values_for_entities(self, instance):
return ParticlesWithNamespacedAttributesView(instance, self.name)
def set_values_for_entities(self, instance, value):
raise AttributeError('"{0}" is already defined as a namespace attribute, you cannot assign a value to it'.format(self.name))
def get_value_for_entity(self, instance, particle, index):
namespaced_set = ParticlesWithNamespacedAttributesView(particle.particles_set, self.name)
# or:
# return namespaced_set[index]
return Particle(
particle.key,
namespaced_set,
particle._set_index,
particle._set_version
)
def set_value_for_entity(self, instance, key, vector):
raise AttributeError('"{0}" is already defined as a namespace attribute, you cannot assign a value to it'.format(self.name))
class Stars(Particles):
pass
class Particle(object):
"""A physical object or a physical region simulated as a
physical object (cloud particle).
All attributes defined on a particle are specific for
that particle (for example mass or position). A particle contains
a set of attributes, some attributes are *generic* and applicable
for multiple modules. Other attributes are *specific* and are
only applicable for a single module.
"""
__slots__ = ("key", "particles_set", "_set_index", "_set_version")
# these are defined so that numpy conversion is way faster
# otherwhise it would go through the __getattr__ function
# which will slow it down by a factor 3
if compare_version_strings(numpy.__version__, '1.7.0') < 0:
__array_interface__ = {'shape':() }
else:
__array_interface__ = {'shape':(),'typestr':'|O4' }
def __len__(self):
raise AttributeError()
def __iter__(self):
raise AttributeError()
__array_struct__ = UndefinedAttribute()
__array__ = UndefinedAttribute()
def __init__(self, key = None, particles_set = None, set_index = None, set_version = -1, **keyword_arguments):
if particles_set is None:
if key == None:
particles_set = Particles(1)
key = particles_set.get_all_keys_in_store()[0]
else:
particles_set = Particles(1, keys = [key])
object.__setattr__(self, "key", key)
object.__setattr__(self, "particles_set", particles_set)
object.__setattr__(self, "_set_index", set_index)
object.__setattr__(self, "_set_version", set_version)
for attribute_name in keyword_arguments:
attribute_value = keyword_arguments[attribute_name]
setattr(self, attribute_name, attribute_value)
def __getstate__(self):
return (self.key, self.as_set().copy())
def __setstate__(self, key_and_set):
key, particles_set = key_and_set
object.__setattr__(self, "key", key)
object.__setattr__(self, "particles_set", particles_set)
object.__setattr__(self, "_set_index", None)
object.__setattr__(self, "_set_version", None)
def __setattr__(self, name_of_the_attribute, new_value_for_the_attribute):
if self._set_index is None or self._set_version != self.particles_set._get_version():
object.__setattr__(self, "_set_index", self.particles_set.get_indices_of_keys([self.key])[0])
object.__setattr__(self, "_set_version", self.particles_set._get_version())
self.particles_set._set_value_of_attribute(
self._set_index,
name_of_the_attribute,
new_value_for_the_attribute
)
def __getattr__(self, name_of_the_attribute):
if self._set_index is None or self._set_version != self.particles_set._get_version():
object.__setattr__(self, "_set_index", self.particles_set.get_indices_of_keys([self.key])[0])
object.__setattr__(self, "_set_version", self.particles_set._get_version())
try:
return self.particles_set._get_value_of_attribute(self, self._set_index, name_of_the_attribute)
except Exception as ex:
raise AttributeError("You tried to access attribute '{0}' but this attribute is not defined for this set.".format(name_of_the_attribute, ex))
def children(self):
return self.particles_set.select(lambda x : x == self, ["parent"])
def descendents(self):
result = self.children()
stack = list(result)
while len(stack) > 0:
current = stack.pop()
children = current.children()
result = result.union(children)
stack.extend(children)
return result
def add_child(self, child):
if self.particles_set != child.particles_set:
raise exceptions.AmuseException("The parent and child particles should be in the same set")
child.parent = self
def copy(self):
return self.particles_set.copy()._get_particle(self.key)
def empty_copy(self):
keys = [self.key]
result = Particles()
result.add_particles_to_store(keys, [],[])
object.__setattr__(result, "_derived_attributes", CompositeDictionary(self.particles_set._derived_attributes))
return result._get_particle(self.key)
def __add__(self, particles):
"""
Returns a particle subset, composed of the given
particle(s) and this particle. Attribute values are
not stored by the subset. The subset provides a view
on the particles.
:parameter particles: particle(s) to be added to self.
>>> particles = Particles(2)
>>> particle1 = particles[0]
>>> particle1.x = 1.0 | units.m
>>> particle2 = particles[1]
>>> particle2.x = 2.0 | units.m
>>> new_set = particle1 + particle2
>>> new_set # doctest:+ELLIPSIS
<amuse.datamodel.particles.ParticlesSubset object at 0x...>
>>> print len(new_set)
2
>>> print new_set.x
[1.0, 2.0] m
"""
return self.as_set().__add__(particles)
def __sub__(self, particles):
"""
Raises an exception: cannot subtract particle(s)
from a particle.
"""
raise exceptions.AmuseException("Cannot subtract particle(s) from a particle.")
def __str__(self):
"""
Display string for a particle
>>> p = Particle(10)
>>> p.x = 10.2 | units.m
>>> p.mass = 5 | units.kg
>>> print p # doctest: +ELLIPSIS
Particle(10, set=<...>
, mass=5.0 kg
, x=10.2 m)
"""
if self._set_index is None or self._set_version != self.particles_set._get_version():
object.__setattr__(self, "_set_index", self.particles_set.get_indices_of_keys([self.key])[0])
object.__setattr__(self, "_set_version", self.particles_set._get_version())
output = 'Particle('
output += str(self.key)
output += ', set=<{0}>'.format(id(self.particles_set))
for name, value in self.particles_set._values_of_particle(self._set_index):
output += '\n , '
output += name
output += '='
if isinstance(value, Particle):
output += 'Particle('
output += str(value.key)
output += ', set=<{0}>'.format(id(value.particles_set))
output += ')'
elif isinstance(value, AbstractParticleSet):
output += value.__class__.__name__
output += '(len={0}, id={1})'.format(len(value), id(value))
else:
output += str(value)
output += ')'
return output
def __dir__(self):
result = []
result.extend(dir(type(self)))
result.extend(self.particles_set._attributes_for_dir())
return result
def __eq__(self, other):
return isinstance(other, type(self)) and other.key == self.key
def __hash__(self):
return self.key.__hash__()
def __ne__(self, other):
return not (isinstance(other, type(self)) and other.key == self.key)
def set_default(self, attribute, quantity):
if not attribute in self.particles_set.get_attribute_names_defined_in_store():
self.particles_set._set_value_of_attribute(self, attribute, quantity)
def get_timeline_of_attribute(self, attribute):
return self.particles_set.get_timeline_of_attribute(self.key, attribute)
def get_timeline_of_attribute_as_vector(self, attribute):
return self.particles_set.get_timeline_of_attribute_as_vector(self.key, attribute)
def get_timeline_of_attributes(self, attributes):
return self.particles_set.get_timeline_of_attributes(self.key, attributes)
def as_set(self):
"""
Returns a subset view on the set containing this particle. The
subset view includes this particle and no other particles.
>>> particles = Particles(2)
>>> particles.x = [1.0, 2.0] | units.m
>>> particle2 = particles[1]
>>> print particle2.x
2.0 m
>>> particles_with_one_particle = particle2.as_set()
>>> len(particles_with_one_particle)
1
>>> print particles_with_one_particle.x
[2.0] m
"""
return ParticlesSubset(self.particles_set, [self.key])
def as_particle_in_set(self, other):
return other._get_particle(self.key)
def get_containing_set(self):
return self.particles_set._original_set()
def update(self, attribute_names, callback):
if self._set_index is None or self._set_version != self.particles_set._get_version():
object.__setattr__(self, "_set_index", self.particles_set.get_indices_of_keys([self.key])[0])
object.__setattr__(self, "_set_version", self.particles_set._get_version())
values = self.particles_set.get_values_in_store((self._set_index), attribute_names)
updated_values = callback(*values)
self.particles_set.set_values_in_store((self._set_index), attribute_names, updated_values)
def create_particle_set(**args):
"""
Returns a particle set from the input vector quantities. input should be named
keyword arguments.
>>> m=units.kg([ 1.,1.])
>>> x=units.m([0.,1.])
>>> particles=create_particle_set(mass=m,x=x)
>>> print len(particles)
2
"""
if len(args)==0:
raise Exception("provide quantities")
n=len(list(args.values())[0])
for a in args:
nn=len(args[a])
if nn!=n:
raise Exception("unequal length quantities")
particles=Particles(n)
for a in args:
setattr(particles,a,args[a])
return particles
class UpdatingParticlesSubset(AbstractParticleSet):
"""A subset of particles that reruns the selection criteria each time the superset is updated (particles added or removed).
Attribute values are not stored by the subset. The subset provides a limited view
to the particles.
Particle subset objects are not supposed to be created
directly. Instead use the ``to_set`` or ``select`` methods.
"""
def __init__(self, particles, selection_function):
AbstractParticleSet.__init__(self, particles)
self._private.particles = particles
self._private.selection_function = selection_function
self._private.keys = [] # numpy.array(keys, dtype='uint64')
self._private.set_of_keys = set([])
self._private.version = -1
self._private.indices = None
def __getitem__(self, index):
keys = self.get_all_keys_in_store()[index]
if hasattr(keys, '__iter__'):
return self._subset(keys)
else:
key = keys
if key > 0 and key < 18446744073709551615: #2**64 - 1
return self._private.particles._get_particle_unsave(key, self.get_all_indices_in_store()[index])
else:
return None
def _get_version(self):
return self._private.particles._get_version()
def compressed(self):
self._sync_with_set()
keys = self._private.keys
return self._subset(keys[numpy.logical_and(keys > 0 , keys < 18446744073709551615)])
def get_valid_particles_mask(self):
self._sync_with_set()
keys = self._private.keys
return numpy.logical_and(keys > 0 , keys < 18446744073709551615)
def unconverted_set(self):
return UpdatedParticlesSubset(self._private.particles.unconverted_set(), self._private.selection_function)
def add_particles_to_store(self, keys, attributes = [], values = []):
"""
Adds particles from to the subset, also
adds the particles to the superset
"""
self._private.keys = None
self._private.set_of_keys = set([])
self._private.version = -1
self._private.particles.add_particles_to_store(keys, attributes, values)
def remove_particles_from_store(self, indices):
"""
Removes particles from the subset, and removes particles
from the super set
"""
indices_to_remove = set(indices)
index = 0
index_in_local_list = []
for x in self._private.indices:
if x in indices_to_remove:
index_in_local_list.append(index)
index += 1
set_to_remove = set(self._private.keys[index_in_local_list])
self._private.keys = [] # will rerun the selection
self._private.set_of_keys = set([])
self._private.particles.remove_particles_from_store(indices)
self._private.version = -1
self._private.indices = None
def get_values_in_store(self, indices, attributes):
if indices is None:
indices = self.get_all_indices_in_store()
return self._private.particles.get_values_in_store(indices, attributes)
def set_values_in_store(self, indices, attributes, values):
if indices is None:
indices = self.get_all_indices_in_store()
self._private.particles.set_values_in_store(indices, attributes, values)
def get_attribute_names_defined_in_store(self):
return self._private.particles.get_attribute_names_defined_in_store()
def get_settable_attribute_names_defined_in_store(self):
return self._private.particles.get_settable_attribute_names_defined_in_store()
def get_all_keys_in_store(self):
self._sync_with_set()
return self._private.keys
def get_all_indices_in_store(self):
self._sync_with_set()
return self._private.indices
def has_key_in_store(self, key):
self._sync_with_set()
return key in self._private.set_of_keys
def _original_set(self):
return self._private.particles
def get_timestamp(self):
return self._original_set().get_timestamp()
def get_indices_of_keys(self, keys):
self._sync_with_set()
return self._private.particles.get_indices_of_keys(keys)
def _sync_with_set(self):
if self._private.particles is None:
return
if not self._private.version == self._private.particles._get_version():
try:
keys = self._private.particles.get_all_keys_in_store()[self._private.selection_function(self._private.particles)]
self._private.keys = numpy.array(keys, dtype='uint64')
self._private.set_of_keys = set(keys)
self._private.indices = self._private.particles.get_indices_of_keys(self._private.keys)
self._private.version = self._private.particles._get_version()
except exceptions.KeysNotInStorageException as ex:
self._private.indices = ex.found_indices
self._private.keys = numpy.array(ex.found_keys, dtype='uint64')
self._private.set_of_keys = set(self._private.keys)
self._private.version = self._private.particles._get_version()
def previous_state(self):
return ParticlesSubset(self._private.particles.previous_state(), self._private.selection_function)
def difference(self, other):
self._sync_with_set()
new_set_of_keys = self._private.set_of_keys.difference(other.as_set()._private.set_of_keys)
return ParticlesSubset(self._private.particles, list(new_set_of_keys))
def union(self, other):
"""
Returns a new subset containing the union between
this set and the provided set.
>>> particles = Particles(3)
>>> particles.mass = [10.0, 20.0, 30.0] | units.kg
>>> subset1 = particles.select(lambda x : x > 25.0 | units.kg, ["mass"])
>>> subset2 = particles.select(lambda x : x < 15.0 | units.kg, ["mass"])
>>> union = subset1.union(subset2)
>>> len(union)
2
>>> sorted(union.mass.value_in(units.kg))
[10.0, 30.0]
"""
self._sync_with_set()
new_set_of_keys = self._private.set_of_keys.union(other.as_set()._private.set_of_keys)
return ParticlesSubset(self._private.particles, list(new_set_of_keys))
def as_set(self):
return self
def copy(self, memento = None, keep_structure = False, filter_attributes = lambda particle_set, x : True):
self._sync_with_set()
if keep_structure:
result = UpdatingParticlesSubset(None, self._private.seleciton_function)
if memento is None:
memento = dict()
memento[id(self)] = result
if id(self._private.particles) in memento:
result._private.particles = memento[id(self._private.particles)]
else:
result._private.particles = self._private.particles.copy(memento, keep_structure, filter_attributes)
result._private.selection_function = self._private.seleciton_function
result._private.keys = []
result._private.set_of_keys = set([])
result._private.collection_attributes = self._private.collection_attributes._copy_for_collection(result)
return result
else:
return super(UpdatingParticlesSubset, self).copy(memento, keep_structure, filter_attributes)
| 155,220
| 36.134211
| 167
|
py
|
amuse
|
amuse-main/support/misc.py
|
import sys
import re
import os
import fnmatch
from os import walk as py_walk
def walk(top, callback, args):
for root, dirs, files in py_walk(top):
callback(args, root, files)
def find_data_files(srcdir, destdir, *wildcards, **kw):
"""
get a list of all files under the srcdir matching wildcards,
returned in a format to be used for install_data
"""
def walk_helper(arg, dirname, files):
if '.svn' in dirname:
return
names = []
lst, wildcards, dirnameconverter, destdir = arg
for wc in wildcards:
wc_name = os.path.normpath(os.path.join(dirname, wc))
for f in files:
filename = os.path.normpath(os.path.join(dirname, f))
if fnmatch.fnmatch(filename, wc_name) and not os.path.isdir(filename):
names.append(filename)
if names:
destdirname = dirnameconverter.sub(destdir, dirname)
lst.append((destdirname, names))
file_list = []
recursive = kw.get('recursive', True)
converter = re.compile('^({0})'.format(srcdir))
if recursive:
walk(srcdir, walk_helper, (file_list, wildcards, converter, destdir))
else:
walk_helper((file_list, wildcards, converter, destdir),
srcdir,
[os.path.basename(f) for f in glob.glob(os.path.join(srcdir, '*'))])
return file_list
| 1,420
| 32.046512
| 88
|
py
|
amuse
|
amuse-main/support/setup_codes.py
|
import warnings
import sys
import os
import os.path
import re
import datetime
from copy import deepcopy
from os.path import abspath
try:
import numpy
except ImportError:
warnings.warn("numpy needed during build; operation may fail")
import configparser
from subprocess import Popen, PIPE, STDOUT
from glob import glob
from distutils.dir_util import create_tree
from distutils import log
from distutils import spawn
from distutils import file_util
from distutils.errors import DistutilsError
from distutils.command.clean import clean
from setuptools.command.install import install
from setuptools import Command
from setuptools.command.build import build
from setuptools.command.develop import develop
from setuptools.command.editable_wheel import editable_wheel
from . import supportrc
if supportrc["framework_install"]:
from .generate_main import generate_main
try:
from numpy.distutils import fcompiler
except ImportError:
fcompiler = None
# check if Python is called on the first line with this expression
first_line_re = re.compile('^#!.*python[0-9.]*([ \t].*)?$')
def pyfiles_in_build_dir(builddir):
module_files = glob(os.path.join(builddir, "*.py"))
result = []
for x in module_files:
result.append(os.path.abspath(x))
return result
class InstallLibraries(Command):
user_options = [
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules", 1),
('no-inplace', 'k',
"put compiled extensions into the build temp "),
('lib-dir=', 'l', "directory containing libraries to build"),
('install-data=', None, "installation directory for data files"),
('root=', None, "install everything relative to this alternate root directory"),
]
negative_opt = {'no-inplace': 'inplace'}
boolean_options = ['inplace']
def initialize_options(self):
self.codes_dir = None
self.lib_dir = None
self.inplace = False
self.build_lib = None
self.build_temp = None
self.install_data = None
self.root = None
def finalize_options(self):
self.set_undefined_options(
'install',
('build_lib', 'build_lib'),
('root', 'root'),
('install_data', 'install_data'),
)
self.set_undefined_options(
'build',
('build_temp', 'build_temp'),
)
if self.lib_dir is None:
if self.inplace:
self.lib_dir = os.path.join('lib')
else:
self.lib_dir = os.path.join(self.build_temp, 'lib')
else:
if self.inplace:
pass
else:
self.lib_dir = os.path.join(self.build_temp, 'lib')
def run(self):
data_dir = os.path.join(self.install_data, 'share', 'amuse') # for the moment add to amuse..
data_dir = os.path.abspath(data_dir)
# copy only:
# '*.h', '*.a', '*.mod', '*.inc', '*.so', '*.dylib'
files = [os.path.join(dp, f) for dp, dn, fn in os.walk(self.lib_dir) for f in fn]
ext = ['.h', '.a', '.mod', '.inc', '.so', '.dylib']
files = [f for f in files if (os.path.splitext(f)[1] in ext)]
files = [os.path.relpath(f, self.lib_dir) for f in files]
create_tree(os.path.join(data_dir, 'lib'), files)
for f in files:
src = os.path.join(self.lib_dir, f)
target = os.path.join(data_dir, 'lib', f)
self.copy_file(src, target)
class GenerateInstallIni(Command):
user_options = (
('build-dir=', 'd', "directory to install to"),
('install-data=', None, "installation directory for data files"),
('force', 'f', "force installation (overwrite existing files)"),
('root=', None, "install everything relative to this alternate root directory"),
)
boolean_options = ['force']
def initialize_options(self):
self.build_dir = None
self.install_data = None
self.force = False
self.root = None
def finalize_options(self):
self.set_undefined_options(
'install',
('build_lib', 'build_dir'),
('install_data', 'install_data'),
('root', 'root'),
('force', 'force'),
)
def run(self):
outfilename = os.path.join(self.build_dir, supportrc["package_name"], 'amuserc')
# this does not work for pip installs
# data_dir = os.path.join(self.install_data,'share','amuse')
# if not self.root is None:
# data_dir = os.path.relpath(data_dir,self.root)
# data_dir = os.path.join('/',data_dir)
# else:
# data_dir = os.path.abspath(data_dir)
installinilines = []
installinilines.append('[channel]')
installinilines.append('must_check_if_worker_is_up_to_date=0')
installinilines.append('use_python_interpreter=1')
# installinilines.append('worker_code_directory={0}'.format(os.path.join(data_dir, 'bin')))
if sys.platform == 'win32':
installinilines.append('worker_code_suffix=".exe"')
installinilines.append('[data]')
# installinilines.append('input_data_root_directory={0}'.format(os.path.join(data_dir, 'data')))
installinilines.append('output_data_root_directory=_amuse_output_data')
# installinilines.append('amuse_root_dir={0}'.format(data_dir))
if 'BUILD_BINARY' in os.environ:
installinilines.append('[test]')
installinilines.append('can_run_tests_to_compile_modules=0')
self.mkpath(os.path.join(self.build_dir, supportrc["package_name"]))
file_util.write_file(outfilename, installinilines)
class CodeCommand(Command):
user_options = [
('build-lib=', 'b',
"directory for compiled extension modules"),
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules", 1),
('no-inplace', 'k',
"put compiled extensions into the build temp "),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('debug', 'g',
"compile/link with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('variant', 'V',
"build variants of the codes (gpu versions etc)"),
('codes-dir=', 'd', "directory containing codes"),
('lib-dir=', 'l', "directory containing libraries to build"),
]
negative_opt = {'no-inplace': 'inplace'}
boolean_options = ['force', 'inplace', 'debug', 'variant']
def initialize_options(self):
self.codes_dir = None
self.lib_dir = None
self.lib_src_dir = None
self.amuse_src_dir = os.path.join('src', supportrc["package_name"])
self.environment = {}
self.environment_notset = {}
self.found_cuda = False
self.found_sapporo = False
self.variant = True
self.inplace = False
self.build_lib = None
self.build_temp = None
self.debug = None
self.force = None
def finalize_options(self):
self.set_undefined_options(
'build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('debug', 'debug'),
('force', 'force'),
)
self.config = None
if supportrc["framework_install"]:
try:
from . import config
self.config = config
except ImportError:
# continue
pass
else:
from amuse import config
self.config = config
if self.codes_dir is None:
if self.inplace:
self.codes_dir = os.path.join(self.amuse_src_dir,'community')
self.codes_src_dir = self.codes_dir
else:
# self.codes_dir = os.path.join(self.build_temp, 'src', 'amuse', 'community')
self.codes_dir = os.path.join(self.build_temp, 'codes')
self.codes_src_dir = os.path.join(self.amuse_src_dir, 'community')
else:
if self.inplace:
self.codes_src_dir = self.codes_dir
else:
self.codes_src_dir = self.codes_dir
# self.codes_dir = os.path.join(self.build_temp, 'src', 'amuse', 'community')
self.codes_dir = os.path.join(self.build_temp, 'codes')
if self.lib_dir is None:
if self.inplace:
self.lib_dir = os.path.join('lib')
self.lib_src_dir = self.lib_dir
else:
self.lib_dir = os.path.join(self.build_temp, 'lib')
self.lib_src_dir = os.path.join('lib')
else:
if self.inplace:
self.lib_src_dir = self.codes_dir
else:
self.lib_src_dir = self.codes_dir
self.lib_dir=os.path.join(self.build_temp, 'lib')
if self.config:
self.environment['PYTHON'] = self.config.interpreters.python
else:
self.environment['PYTHON'] = sys.executable
self.set_cuda_variables()
self.set_mpi_variables()
self.set_compiler_variables()
self.set_fortran_variables()
if 'FORTRAN' in self.environment:
self.environment['F90'] = self.environment['FORTRAN']
self.environment['FC'] = self.environment['FORTRAN']
self.set_java_variables()
self.set_openmp_flags()
self.set_libdir_variables()
self.set_libs_variables()
self.save_cfgfile_if_not_exists()
if 'MSYSCON' in os.environ:
pass
else:
if not supportrc["framework_install"]:
try:
from amuse.support import get_amuse_root_dir
except ImportError:
raise Exception("AMUSE framework needs to be installed and environment set up.")
self.environment['AMUSE_DIR'] = get_amuse_root_dir()
else:
if self.inplace:
self.environment['AMUSE_DIR'] = os.path.abspath(os.getcwd())
else:
self.environment['AMUSE_DIR'] = os.path.abspath(self.build_temp)
if self.inplace:
self.environment['MUSE_PACKAGE_DIR'] = os.path.abspath(os.getcwd())
else:
self.environment['MUSE_PACKAGE_DIR'] = os.path.abspath(self.build_temp)
def set_fortran_variables(self):
if 'FORTRAN' in self.environment:
return
if 'FORTRAN' in os.environ:
self.environment['FORTRAN'] = os.environ['FORTRAN']
return
if self.config:
self.environment['FORTRAN'] = self.config.compilers.fc
return
if 'FC' in os.environ:
self.environment['FORTRAN'] = os.environ['FC']
return
if 'FORT' in os.environ:
self.environment['FORTRAN'] = os.environ['FORT']
return
if 'F90' in os.environ:
self.environment['FORTRAN'] = os.environ['F90']
return
mpif90 = os.environ['MPIF90'] if 'MPIF90' in os.environ else 'mpif90'
try:
process = Popen([mpif90, '-show'], stdout=PIPE, stderr=PIPE)
stdoutstring, stderrstring = process.communicate()
if process.returncode == 0:
parts = stdoutstring.split()
self.environment['FORTRAN'] = parts[0]
return
process = Popen([mpif90, '--showme '], stdout=PIPE, stderr=PIPE)
stdoutstring, stderrstring = process.communicate()
if process.returncode == 0:
parts = stdoutstring.split()
self.environment['FORTRAN'] = parts[0]
return
except:
pass
if fcompiler:
compiler = fcompiler.new_fcompiler(requiref90=True)
if compiler is not None:
fortran_executable = compiler.executables['compiler_f90'][0]
self.environment['FORTRAN'] = fortran_executable
def is_mpi_enabled(self):
if self.config and hasattr(self.config.mpi, 'is_enabled'):
return self.config.mpi.is_enabled
else:
return True
def set_cuda_variables(self):
all_found = True
if self.config and self.config.cuda.is_enabled:
self.found_cuda = True
self.environment['CUDA_LIBDIRS'] = '-L'+self.config.cuda.toolkit_path+'/lib' + ' -L'+self.config.cuda.toolkit_path+'/lib64'
self.environment['CUDA_TK'] = self.config.cuda.toolkit_path
self.environment['CUDA_SDK'] = self.config.cuda.sdk_path
if hasattr(self.config.cuda, 'cuda_libs'):
self.environment['CUDA_LIBS'] = self.config.cuda.cuda_libs
else:
raise DistutilsError("configuration is not up to date for cuda, please reconfigure amuse by running 'configure --enable-cuda'")
return
if self.config and not self.config.cuda.is_enabled:
self.found_cuda = True
self.environment['CUDA_LIBDIRS'] = '-L/NOCUDACONFIGURED/lib' + ' -LNOCUDACONFIGURED/lib64'
self.environment['CUDA_LIBS'] = '-lnocuda'
self.environment['CUDART_LIBS'] = '-lnocudart'
self.environment['CUDA_TK'] = '/NOCUDACONFIGURED'
self.environment['CUDA_SDK'] = '/NOCUDACONFIGURED'
return
for x in ['CUDA_TK', 'CUDA_SDK']:
if not x in self.environment:
all_found = False
break
if all_found:
cuda_dir = self.environment['CUDA_TK']
self.environment['CUDA_LIBDIRS'] = '-L'+cuda_dir+'/lib' + ' -L'+cuda_dir+'/lib64'
self.environment['CUDA_LIBS'] = '-lcudart'
return
directory = spawn.find_executable('nvcc')
if directory is None:
self.found_cuda = False
self.environment_notset['CUDA_SDK'] = '<directory>'
self.environment_notset['CUDA_TK'] = '<directory>'
return
cuda_dir = os.path.dirname(os.path.dirname(directory))
self.environment['CUDA_LIBDIRS'] = '-L'+cuda_dir+'/lib' + ' -L'+cuda_dir+'/lib64'
self.environment['CUDA_LIBS'] = '-lcudart'
self.environment['CUDA_TK'] = cuda_dir
if not 'CUDA_SDK' in self.environment:
self.environment_notset['CUDA_SDK'] = '<directory>'
self.found_cuda = True
def set_mpi_variables(self):
if self.config:
self.environment['MPICXX'] = self.config.mpi.mpicxx
self.environment['MPICC'] = self.config.mpi.mpicc
self.environment['MPIF90'] = self.config.mpi.mpif95
return
def set_compiler_variables(self):
if self.config and not hasattr(self.config.compilers, 'found_fftw'):
raise DistutilsError("configuration is not up to date, please reconfigure amuse by running 'configure'")
if self.config:
self.environment['CXX'] = self.config.compilers.cxx
self.environment['CC'] = self.config.compilers.cc
self.environment['FC'] = self.config.compilers.fc
self.environment['CFLAGS'] = self.config.compilers.cc_flags
self.environment['CXXFLAGS'] = self.config.compilers.cxx_flags
self.environment['FFLAGS'] = self.config.compilers.fc_flags
if self.config.compilers.found_fftw == 'yes':
self.environment['FFTW_FLAGS'] = self.config.compilers.fftw_flags
self.environment['FFTW_LIBS'] = self.config.compilers.fftw_libs
if self.config.compilers.found_gsl == 'yes':
self.environment['GSL_FLAGS'] = self.config.compilers.gsl_flags
self.environment['GSL_LIBS'] = self.config.compilers.gsl_libs
return
def set_java_variables(self):
if self.config and hasattr(self.config, 'java') and hasattr(self.config.java, 'is_enabled') and self.config.java.is_enabled:
self.environment['JAVA'] = self.config.java.java
self.environment['JAVAC'] = self.config.java.javac
self.environment['JAR'] = self.config.java.jar
else:
self.environment['JAVA'] = ''
self.environment['JAVAC'] = ''
self.environment['JAR'] = ''
return
def set_openmp_flags(self):
if self.config and hasattr(self.config, 'openmp'):
self.environment['OPENMP_FCFLAGS'] = self.config.openmp.fcflags
self.environment['OPENMP_CFLAGS'] = self.config.openmp.cflags
else:
self.environment['OPENMP_FCFLAGS'] = ''
self.environment['OPENMP_CFLAGS'] = ''
def set_libdir_variables(self):
for varname in ('SAPPORO_LIBDIRS', 'GRAPE6_LIBDIRS'):
if varname in self.environment:
continue
if varname in os.environ:
self.environment[varname] = os.environ[varname]
else:
self.environment_notset[varname] = '-L<directory>'
if 'SAPPORO_LIBDIRS' in self.environment:
self.environment['SAPPORO_LIBS'] = '-L{0} -lsapporo'.format(
self.environment['SAPPORO_LIBDIRS']
)
else:
if self.config and hasattr(self.config.cuda, 'sapporo_version'):
if self.config.cuda.sapporo_version == '2':
self.environment['SAPPORO_LIBS'] = '-L{0}/lib/sapporo-2 -lsapporo {1}'.format(
os.path.abspath(os.getcwd()),
self.config.openmp.cflags
)
else:
self.environment['SAPPORO_LIBS'] = '-L{0}/lib/sapporo_light -lsapporo'.format(
os.path.abspath(os.getcwd())
)
else:
self.environment['SAPPORO_LIBS'] = '-L{0}/lib/sapporo_light -lsapporo'.format(
os.path.abspath(os.getcwd())
)
self.environment['BOOSTLIBS'] = ''
def set_libs_variables(self):
for varname, libname in []:
if varname in self.environment:
continue
if varname in os.environ:
self.environment[varname] = os.environ[varname]
else:
self.environment_notset[varname] ='-L<directory> -l{0}'.format(libname)
def copy_config_to_build_dir(self):
configpath = os.path.abspath(os.getcwd())
if self.inplace:
topath = self.amuse_src_dir
else:
topath = os.path.join(self.build_lib, "amuse")
self.copy_file(os.path.join(configpath, "config.mk"), topath)
def copy_build_prereq_to_build_dir(self):
if not os.path.exists(self.build_temp):
self.mkpath(self.build_temp)
if supportrc["framework_install"]:
configpath = os.path.abspath(os.getcwd())
self.copy_file(os.path.join(configpath, "config.mk"), self.build_temp)
# self.copy_tree(os.path.join(configpath,"support"), os.path.join(self.build_temp,"support") )
# self.copy_tree(os.path.join(configpath,"src"), os.path.join(self.build_temp,"src") )
path = os.path.join(self.build_temp, "src")
if not os.path.exists(path) and not os.path.islink(path):
os.symlink(os.path.relpath(self.build_lib, self.build_temp), path)
def copy_codes_to_build_dir(self):
for directory in self.makefile_paths(self.codes_src_dir):
reldir = os.path.relpath(directory, self.codes_src_dir)
self.copy_tree(
directory,
os.path.join(self.codes_dir, reldir)
)
def copy_lib_to_build_dir(self):
for directory in self.makefile_paths(self.lib_src_dir):
reldir = os.path.relpath(directory, self.lib_src_dir)
self.copy_tree(
directory,
os.path.join(self.lib_dir, reldir)
)
def copy_worker_codes_to_build_dir(self):
if sys.platform == 'win32':
worker_code_re = re.compile(r'(([a-zA-Z0-9]+_)*)?worker(_[a-zA-Z0-9]+)?(.exe)?')
else:
worker_code_re = re.compile(r'(([a-zA-Z0-9]+_)*)?worker(_[a-zA-Z0-9]+)?')
worker_so_re = re.compile(r'(([a-zA-Z0-9]+_)*)?cython(_[a-zA-Z0-9]+)?.so')
lib_binbuilddir = os.path.join(self.build_lib, supportrc["package_name"], '_workers')
if not os.path.exists(lib_binbuilddir):
self.mkpath(lib_binbuilddir)
for srcdir in self.makefile_paths(self.codes_src_dir):
reldir = os.path.relpath(srcdir, self.codes_src_dir)
temp_builddir = os.path.join(self.codes_dir, reldir)
self.announce("will copy worker: {0}".format(srcdir), level=log.INFO)
lib_builddir = os.path.join(self.build_lib, os.path.relpath(srcdir, os.path.join(self.amuse_src_dir, '..')))
shortname = reldir.lower()
self.announce(shortname, level=log.INFO)
for name in os.listdir(temp_builddir):
path = os.path.join(temp_builddir, name)
stat = os.stat(path)
if os.path.isfile(path):
if worker_so_re.match(name):
topath = os.path.join(lib_builddir, name)
self.copy_file(path, topath)
continue
# self.announce("will copy worker: {0}".format(name), level = log.INFO)
if os.path.isfile(path) and os.access(path, os.X_OK):
if worker_code_re.match(name):
topath = os.path.join(lib_binbuilddir, name)
self.copy_file(path, topath)
elif not name.endswith('.py'):
self.announce("will not copy executable: {0}, it does not match the worker pattern".format(name), level=log.WARN)
# also copy file or dir named data
path = os.path.join(temp_builddir, 'data')
topath = os.path.join(lib_builddir, 'data')
if os.path.isfile(path):
self.copy_file(path, topath)
if os.path.isdir(path):
self.copy_tree(path, topath)
def copy_worker_codes(self):
if sys.platform == 'win32':
worker_code_re = re.compile(r'(([a-zA-Z0-9]+_)*)?worker(_[a-zA-Z0-9]+)?(.exe)?')
else:
worker_code_re = re.compile(r'(([a-zA-Z0-9]+_)*)?worker(_[a-zA-Z0-9]+)?')
worker_so_re = re.compile(r'(([a-zA-Z0-9]+_)*)?cython(_[a-zA-Z0-9]+)?.so')
for srcdir in self.makefile_paths(self.codes_src_dir):
reldir = os.path.relpath(srcdir, self.codes_src_dir)
temp_builddir = os.path.join(self.codes_dir, reldir)
self.announce("will copy worker: {0}".format(srcdir), level=log.INFO)
lib_builddir = os.path.join(self.build_lib, os.path.relpath(srcdir, os.path.join(self.amuse_src_dir, '..')))
shortname = reldir.lower()
self.announce(shortname, level=log.INFO)
for name in os.listdir(temp_builddir):
path = os.path.join(temp_builddir, name)
stat = os.stat(path)
if os.path.isfile(path):
if worker_so_re.match(name):
topath = os.path.join(lib_builddir, name)
self.copy_file(path, topath)
continue
if os.path.isfile(path) and os.access(path, os.X_OK):
if worker_code_re.match(name):
topath = os.path.join(lib_builddir, name)
self.copy_file(path, topath)
elif not name.endswith('.py'):
self.announce("will not copy executable: {0}, it does not match the worker pattern".format(name), level=log.WARN)
# also copy file or dir named data
path = os.path.join(temp_builddir, 'data')
topath = os.path.join(lib_builddir, 'data')
if os.path.isfile(path):
self.copy_file(path, topath)
if os.path.isdir(path):
self.copy_tree(path, topath)
def subdirs_in_path(self, path):
if not os.path.exists(path):
return
names = sorted(os.listdir(path))
for name in names:
if name.startswith('.'):
continue
path_ = os.path.join(path, name)
if os.path.isdir(path_):
yield path_
def makefile_paths(self, path):
for x in self.subdirs_in_path(path):
for name in ('makefile', 'Makefile'):
makefile_path = os.path.join(x, name)
if os.path.exists(makefile_path):
yield x
break
def update_environment_from_cfgfile(self):
if os.path.exists('amuse.cfg'):
config = configparser.ConfigParser()
config.read(['amuse.cfg'])
for name, value in config.items('environment'):
if isinstance(value, str) and value:
varname = name.upper()
self.environment[varname] = value
if varname in self.environment_notset:
del self.environment_notset[varname]
def save_cfgfile_if_not_exists(self):
if not os.path.exists('amuse.cfg'):
config = configparser.RawConfigParser()
config.add_section('environment')
for name, value in self.environment.items():
config.set('environment', name, value)
for name, value in self.environment_notset.items():
config.set('environment', name, '')
with open('amuse.cfg', 'w') as f:
config.write(f)
def get_special_targets(self, name, directory, environment):
process = Popen(['make', '-qp', '-C', directory], env=environment, stdout=PIPE, stderr=PIPE)
stdoutstring, stderrstring = process.communicate()
stdoutstring = str(stdoutstring, 'utf-8')
lines = stdoutstring.splitlines()
result = []
for line in lines:
if line.startswith('muse_worker_gpu:'):
result.append(('muse_worker_gpu', 'GPU',))
elif line.startswith('muse_worker_grape:'):
result.append(('muse_worker_grape', 'GRAPE6',))
elif line.startswith('muse_worker_'):
index_of_the_colon = line.index(':')
if(index_of_the_colon > 0):
targetname = line[len('muse_worker_'):index_of_the_colon]
if '%' not in targetname:
result.append((line[:index_of_the_colon], targetname,))
elif line.startswith('worker_code_'):
index_of_the_colon = line.index(':')
if(index_of_the_colon > 0):
targetname = line[len('worker_code_'):index_of_the_colon]
if '%' not in targetname:
result.append((line[:index_of_the_colon], targetname,))
elif line.startswith(name + '_worker_'):
index_of_the_colon = line.index(':')
if(index_of_the_colon > 0):
targetname = line[len(name + '_worker_'):index_of_the_colon]
if '%' not in targetname:
result.append((line[:index_of_the_colon], targetname,))
result = list(set(result))
return result
def call(self, arguments, buildlogfile=None, **keyword_arguments):
stringio = []
self.announce(' '.join(arguments), log.DEBUG)
process = Popen(
arguments,
stdout=PIPE,
stderr=STDOUT,
**keyword_arguments
)
while True:
line = process.stdout.readline()
if len(line) == 0:
break
if buildlogfile is not None:
buildlogfile.write(line)
self.announce(line[:-1].decode("utf-8"), log.DEBUG)
stringio.append(str(line, 'utf-8'))
result = process.wait()
content = ''.join(stringio)
if result != 0:
self.announce("error in call, tail output:\n", log.INFO)
self.announce(''.join(stringio[-100:]), log.INFO)
self.announce("-"*80, log.INFO)
return result, content
def build_environment(self):
environment = self.environment.copy()
environment.update(os.environ)
path = os.path.join(environment["MUSE_PACKAGE_DIR"], "src")
if environment["MUSE_PACKAGE_DIR"] != environment["AMUSE_DIR"]:
path = path+":"+os.path.join(environment["AMUSE_DIR"], "src")
path = path+':'+environment.get("PYTHONPATH", "")
environment["PYTHONPATH"] = path
return environment
def do_clean(self):
environment = self.build_environment()
for x in self.makefile_paths(self.lib_dir):
self.announce("cleaning libary " + x)
self.call(['make', '-C', x, 'clean'], env=environment)
for x in self.makefile_paths(self.codes_dir):
if os.path.exists(x):
self.announce("cleaning " + x)
self.call(['make', '-C', x, 'clean'], env=environment)
def do_distclean(self):
environment = self.build_environment()
for x in self.makefile_paths(self.lib_dir):
self.announce("cleaning libary:" + x)
self.call(['make', '-C', x, 'distclean'], env=environment)
for x in self.makefile_paths(self.codes_dir):
self.announce("cleaning community code:" + x)
self.call(['make', '-C', x, 'distclean'], env=environment)
class SplitOutput(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def __del__(self):
self.close()
def close(self):
self.file1.close()
self.file2.close()
def write(self, text):
self.file1.write(text)
self.file2.write(text)
def flush(self):
self.file1.flush()
self.file2.flush()
class BuildCodes(CodeCommand):
description = "build interfaces to codes"
user_options = list(CodeCommand.user_options)
user_options.append(('clean=', 'c', "clean code",), )
def initialize_options(self):
CodeCommand.initialize_options(self)
self.clean = 'no'
def finalize_options(self):
CodeCommand.finalize_options(self)
self.must_clean = self.clean == 'yes'
self.must_dist_clean = self.clean == 'dist'
def run_make_on_directory(self, codename, directory, target, environment):
buildlog = os.path.abspath("build.log")
with open(buildlog, "a") as output:
output.write('*'*100)
output.write('\n')
output.write('Building code: {0}, target: {1}, in directory: {2}\n'.format(codename, target, directory))
output.write('*'*100)
output.write('\n')
output.flush()
if environment.get('AMUSE_USE_CCACHE', 0) != "1" or "CCACHE_BASEDIR" in environment:
build_environment = environment
else:
build_environment = deepcopy(environment)
build_environment["CCACHE_BASEDIR"] = abspath(directory)
with open(buildlog, "ab") as output:
result, resultcontent = self.call(
['make', '-C', directory, target],
output,
env=build_environment
)
with open(buildlog, "a") as output:
output.write('*'*100)
output.write('\n')
return result, resultcontent
def is_download_needed(self, string):
for line in string.splitlines():
if 'DOWNLOAD_CODES' in line:
return True
return False
def is_cuda_needed(self, string):
for line in string.splitlines():
if 'CUDA_TK variable is not set' in line:
return True
if 'CUDA_SDK variable is not set' in line:
return True
return False
def are_python_imports_needed(self, string):
for line in string.splitlines():
if 'Python imports not available' in line:
return True
return False
def run(self):
if self.must_clean:
self.do_clean()
if self.must_dist_clean:
self.do_distclean()
not_build = list()
is_download_needed = list()
is_cuda_needed = list()
not_build_special = {}
are_python_imports_needed = list()
build = list()
lib_build = list()
lib_not_build = list()
environment = self.build_environment()
buildlog = 'build.log'
self.announce("building libraries and community codes", level=log.INFO)
self.announce("build, for logging, see '{0}'".format(buildlog), level=log.INFO)
with open(buildlog, "w") as output:
output.write('*'*100)
output.write('\n')
output.write('Building libraries and codes\n')
output.write('*'*100)
output.write('\n')
if not self.lib_dir == self.lib_src_dir:
self.copy_build_prereq_to_build_dir()
self.copy_lib_to_build_dir()
for x in self.makefile_paths(self.lib_dir):
shortname = x[len(self.lib_dir) + 1:] + '-library'
starttime = datetime.datetime.now()
self.announce("[{1:%H:%M:%S}] building {0}".format(shortname, starttime), level=log.INFO)
returncode, outputlog = self.run_make_on_directory(shortname, x, 'all', environment)
endtime = datetime.datetime.now()
if returncode == 2:
self.announce("[{2:%H:%M:%S}] building {0}, failed, see {1!r} for error log".format(shortname, buildlog, endtime), level=log.DEBUG)
if self.is_download_needed(outputlog):
is_download_needed.append(x[len(self.lib_dir) + 1:])
elif self.is_cuda_needed(outputlog):
is_cuda_needed.append(x[len(self.lib_dir) + 1:])
else:
lib_not_build.append(shortname)
else:
self.announce("[{1:%H:%M:%S}] building {0}, succeeded".format(shortname, endtime), level=log.DEBUG)
lib_build.append(shortname)
if not self.codes_dir == self.codes_src_dir:
self.copy_codes_to_build_dir()
# environment.update(self.environment)
makefile_paths = list(self.makefile_paths(self.codes_dir))
build_to_special_targets = {}
for x in makefile_paths:
shortname = x[len(self.codes_dir) + 1:].lower()
starttime = datetime.datetime.now()
# For binary builds we do not want
# to distribute mesa, it will make the
# download size from about 100mb size
# to > 1Gb size.
#
# Could we remove some of the data files from mesa?
#
if not self.inplace and shortname == 'mesa':
self.announce("[{1:%H:%M:%S}] skipping {0}".format(shortname, starttime), level=log.INFO)
continue
self.announce("[{1:%H:%M:%S}] building {0}".format(shortname, starttime), level=log.INFO)
returncode, outputlog = self.run_make_on_directory(shortname, x, 'all', environment)
endtime = datetime.datetime.now()
if returncode > 0:
self.announce(
"[{2:%H:%M:%S}] building {0}, failed, see {1!r} for error log".format(shortname, buildlog, endtime),
level=log.DEBUG
)
if self.is_download_needed(outputlog):
is_download_needed.append(shortname)
elif self.is_cuda_needed(outputlog):
is_cuda_needed.append(shortname)
elif self.are_python_imports_needed(outputlog):
are_python_imports_needed.append(shortname)
else:
not_build.append(shortname)
if self.is_mpi_enabled():
continue
else:
build.append(shortname)
is_built = True
self.announce(
"[{1:%H:%M:%S}] building {0}, succeeded".format(shortname, endtime),
level=log.DEBUG
)
if not self.variant:
continue
special_targets = self.get_special_targets(shortname, x, environment)
for target, target_name in special_targets:
starttime = datetime.datetime.now()
self.announce(
"[{2:%H:%M:%S}] building {0} - {1}".format(shortname, target_name, starttime),
level=log.DEBUG
)
returncode, outputlog = self.run_make_on_directory(shortname, x, target, environment)
endtime = datetime.datetime.now()
if returncode > 0:
specials_list = not_build_special.setdefault(shortname,[])
specials_list.append(target_name)
self.announce(
"[{3:%H:%M:%S}] building {0} - {1}, failed, see {2!r} for error log".format(shortname, target_name, buildlog, endtime),
level=log.DEBUG
)
else:
build_to_special_targets.setdefault(shortname, list()).append(target_name)
self.announce(
"[{2:%H:%M:%S}] building {0} - {1}, succeeded".format(shortname, target_name, endtime), level=log.DEBUG
)
# if supportrc["framework_install"]:
# self.copy_config_to_build_dir()
if not self.codes_dir == self.codes_src_dir:
# self.copy_worker_codes_to_build_dir()
self.copy_worker_codes()
with open(buildlog, "a") as output:
output.write('*'*80)
output.write('\n')
output.write('Building finished\n')
output.write('*'*80)
output.write('\n')
self.announce("Environment variables")
self.announce("="*80)
sorted_keys = sorted(self.environment.keys())
for x in sorted_keys:
self.announce("%s\t%s" % (x, self.environment[x]))
if not self.is_mpi_enabled():
all_build = set(build)
not_build_copy = []
for x in not_build:
if x in build_to_special_targets:
if x not in all_build:
build.append(x)
all_build.add(x)
else:
not_build_copy.append(x)
not_build = not_build_copy
if (
not_build
or not_build_special
or is_download_needed
or is_cuda_needed
or are_python_imports_needed
):
if not_build:
level = log.WARN
else:
level = log.INFO
if not_build:
self.announce(
"Community codes not built (because of errors/ missing libraries):",
level=level
)
self.announce("="*80, level=level)
for x in not_build:
self.announce(' * {0}'.format(x), level=level)
if not_build_special:
self.announce("Optional builds skipped, need special libraries:", level=level)
for x in sorted(not_build_special.keys()):
self.announce(' * {0} - {1}'.format(x, ', '.join(not_build_special[x])), level=level)
if is_cuda_needed:
self.announce("Optional builds skipped, need CUDA/GPU libraries:", level=level)
for x in is_cuda_needed:
self.announce(' * {0}'.format(x), level=level)
if are_python_imports_needed:
self.announce("Optional builds skipped, need additional python packages:", level=level)
for x in are_python_imports_needed:
self.announce(' * {0}'.format(x), level=level)
if is_download_needed:
self.announce("Optional builds skipped, need separate download", level=level)
for x in is_download_needed:
self.announce(
f' * {x} , make {x}.code DOWNLOAD_CODES=1', level=level
)
self.announce("="*80, level=level)
if build:
level = log.INFO
self.announce("Community codes built", level=level)
self.announce("="*80, level=level)
for x in build:
if x in build_to_special_targets:
y = build_to_special_targets[x]
self.announce('* {0} ({1})'.format(x, ','.join(y)), level=level)
else:
self.announce('* {0}'.format(x), level=level)
self.announce("="*80, level=level)
level = log.INFO
self.announce(
"{0} out of {1} codes built, {2} out of {3} libraries built".format(
len(build),
len(build) + len(not_build),
len(lib_build),
len(lib_build) + len(lib_not_build)
),
level=level
)
self.announce("(not all codes and libraries need to be built)")
if self.config and (not hasattr(self.config, 'java') or not hasattr(self.config.java, 'is_enabled')):
self.announce(
"Your configuration is out of date, please rerun configure",
level=level
)
allow_build_failures=environment.get("AMUSE_ALLOW_BUILD_FAILURES", supportrc["allow_build_failures"])
if allow_build_failures=="none" and len(not_build)>0:
raise Exception("Unexpected build failure(s) detected. Aborting.")
if allow_build_failures=="some" and len(not_build)>0 and len(build)==0:
raise Exception("No succesful builds detected. Aborting.")
if allow_build_failures=="all" and len(not_build)>0 and len(build)==0:
self.announce("Continuing despite apparent build failure", level=level)
class BuildLibraries(BuildCodes):
description = "build just the supporting libraries"
def subdirs_in_path(self,path):
# bit hackish way to filter out non lib stuff
if path not in [self.lib_dir, self.lib_src_dir]:
return
if not os.path.exists(path):
return
names = sorted(os.listdir(path))
for name in names:
if name.startswith('.'):
continue
path_ = os.path.join(path, name)
if os.path.isdir(path_):
yield path_
# the following two are for convenience, not strictly necessary
class BuildLibraries_inplace(BuildLibraries):
description = "build just the supporting libraries, in place"
def initialize_options(self):
BuildLibraries.initialize_options(self)
self.inplace=True
class BuildCodes_inplace(BuildCodes):
description = "build interfaces to codes, in place"
def initialize_options(self):
BuildCodes.initialize_options(self)
self.inplace = True
class ConfigureCodes(CodeCommand):
description = "run configure for amuse"
def run(self):
if os.path.exists('config.mk') or self.config:
self.announce("Already configured, not running configure", level=2)
return
environment = self.build_environment()
self.announce("Running configure for AMUSE", level=2)
result,content=self.call(['./configure'], env=environment, shell=True)
if not os.path.exists('config.mk'):
self.announce("config.mk not generated; output of configure:", level=2)
self.announce(content, level=2)
raise Exception("configure failed")
with open("config.mk") as infile:
self.announce("configure generated config.mk", level=2)
self.announce("="*80, level=2)
for line in infile:
self.announce(line[:-1], level=2)
self.announce("="*80, level=2)
class CleanCodes(CodeCommand):
description = "clean build products in codes"
def run(self):
self.announce("Cleaning libraries and community codes", level=2)
self.do_clean()
class DistCleanCodes(CodeCommand):
description = "clean for distribution"
def run(self):
self.announce("Cleaning for distribution, libraries and community codes", level=2)
self.do_distclean()
class BuildOneCode(BuildCodes):
description = "build one code"
user_options = list(BuildCodes.user_options)
user_options.append(('code-name=', 'n', "name of the code",), )
def initialize_options(self):
BuildCodes.initialize_options(self)
self.code_name = None
def finalize_options(self):
BuildCodes.finalize_options(self)
if self.code_name is None:
raise Exception("no code was specified")
def subdirs_in_path(self,path):
if not os.path.exists(path):
return
names = os.listdir(path)
for name in names:
if name.startswith('.'):
continue
if not name.lower().startswith(self.code_name.lower()):
continue
path_ = os.path.join(path, name)
if os.path.isdir(path_):
yield path_
def run(self):
if not self.inplace:
self.run_command("build_py")
BuildCodes.run(self)
class Clean(clean):
# make sure sub_commands are independent
sub_commands = list(clean.sub_commands)
def run(self):
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
class Install(install):
sub_commands = list(install.sub_commands)
def run(self):
# this ensures sub commands are run first (only run once)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
install.run(self)
class Develop(develop):
sub_commands = list(develop.sub_commands)
def run(self):
# this ensures sub commands are run first (only run once)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
develop.run(self)
class Editable_wheel(editable_wheel):
sub_commands = list(develop.sub_commands)
def run(self):
build.sub_commands.remove(('build_codes', None))
build.sub_commands.append(('build_libraries_in_place', None))
# this ensures sub commands are run first (only run once)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
editable_wheel.run(self)
def setup_commands():
mapping_from_command_name_to_command_class = {
'build_codes': BuildCodes,
'build_code': BuildOneCode,
'clean_codes': CleanCodes,
'dist_clean': DistCleanCodes,
'clean_python': clean,
'clean': Clean,
'install': Install,
'build_libraries': BuildLibraries,
'build_libraries_in_place': BuildLibraries_inplace,
'install_libraries': InstallLibraries,
'develop': Develop,
'develop_build': BuildCodes_inplace,
'editable_wheel': Editable_wheel
}
build.sub_commands.append(('build_codes', None))
Clean.sub_commands.append(('clean_codes', None))
Clean.sub_commands.append(('clean_python', None))
Install.sub_commands.append(('install_libraries', None))
Develop.sub_commands.append(('build_libraries_in_place', None))
if supportrc["framework_install"]:
mapping_from_command_name_to_command_class.update(
{
'configure_codes': ConfigureCodes,
'generate_install_ini': GenerateInstallIni,
'generate_main': generate_main,
}
)
build.sub_commands.insert(0, ('configure_codes', None))
Install.sub_commands.insert(0, ('generate_install_ini', None))
Develop.sub_commands.insert(0, ('configure_codes', None))
return mapping_from_command_name_to_command_class
| 49,244
| 36.851653
| 147
|
py
|
amuse
|
amuse-main/support/config.py
|
#
# configuration from config.mk
import os
import warnings
def parse_configmk(filename):
f=open(filename,"r")
lines=f.readlines()
f.close()
cfgvars=dict()
if "amuse configuration" not in lines[0]:
raise Exception("file: {0} is not an amuse configuration file".format(filename))
for line in lines:
if "=" in line:
var, value=line.split("=",1)
if value.startswith("@") and value.endswith("@"):
warnings.warn("possible configuration error/ unconfigured variable in {0}".format(filename))
cfgvars[var]=value.strip()
return cfgvars
try:
configmk=parse_configmk("config.mk")
except IOError:
from .support import get_amuse_root_dir
configmk=parse_configmk(os.path.join(get_amuse_root_dir(),"config.mk"))
class interpreters(object):
python = configmk["PYTHON"]
class compilers(object):
cxx = configmk["CXX"]
cc = configmk["CC"]
fc = configmk["FC"]
cxx_flags = configmk["CXXFLAGS"]
cc_flags = configmk["CFLAGS"]
fc_flags = configmk["FCFLAGS"]
ld_flags = configmk["LDFLAGS"]
found_fftw = configmk["FOUND_FFTW"]
fftw_flags = configmk["FFTW_FLAGS"]
fftw_libs = configmk["FFTW_LIBS"]
found_gsl = configmk["FOUND_GSL"]
gsl_flags = configmk["GSL_FLAGS"]
gsl_libs = configmk["GSL_LIBS"]
gfortran_version = configmk["GFORTRAN_VERSION"]
ifort_version = configmk["IFORT_VERSION"]
fc_iso_c_bindings = configmk["FC_ISO_C_AVAILABLE"]=='yes'
cython = configmk["CYTHON"]
pythondev_cflags = configmk["PYTHONDEV_CFLAGS"]
pythondev_ldflags = configmk["PYTHONDEV_LDFLAGS"]
class mpi(object):
is_enabled = configmk["MPI_ENABLED"]=='yes'
mpicxx = configmk["MPICXX"]
mpicc = configmk["MPICC"]
mpif95 = configmk["MPIFC"]
mpifc = configmk["MPIFC"]
mpif90 = configmk["MPIFC"]
mpiexec = configmk["MPIEXEC"]
mpi_cflags = configmk["MPI_CFLAGS"]
mpi_cxxflags = configmk["MPI_CXXFLAGS"]
mpi_fcflags = configmk["MPI_FCFLAGS"]
mpi_clibs = configmk["MPI_CLIBS"]
mpi_cxxlibs = configmk["MPI_CXXLIBS"]
mpi_fclibs = configmk["MPI_FCLIBS"]
class java(object):
is_enabled = configmk["JAVA_ENABLED"]=='yes'
java = configmk["JAVA"]
javac = configmk["JAVAC"]
jar = configmk["JAR"]
version = configmk["JAVA_VERSION"]
class cuda(object):
is_enabled = configmk["CUDA_ENABLED"]=='yes'
compiler = configmk["NVCC"]
compiler_flags = configmk["NVCC_FLAGS"]
toolkit_path = configmk["CUDA_TK"]
sdk_path = "/TOBEFIXED"
cuda_libs = configmk["CUDA_LIBS"]
sapporo_version = configmk["SAPPORO_VERSION"]
class openmp(object):
is_enabled = configmk["OPENMP_ENABLED"]=='yes'
fcflags = configmk["OPENMP_FCFLAGS"]
cflags = configmk["OPENMP_CFLAGS"]
| 2,831
| 29.451613
| 106
|
py
|
amuse
|
amuse-main/support/version.py
|
from setuptools_scm import get_version
version = get_version()
def main():
print(("%s" % version))
if __name__ == "__main__":
main()
| 145
| 12.272727
| 38
|
py
|
amuse
|
amuse-main/support/classifiers.py
|
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: C',
'Programming Language :: C++',
'Programming Language :: Fortran',
'Topic :: Scientific/Engineering :: Astronomy',
]
def main():
for i in classifiers:
print(i)
if __name__ == "__main__":
main()
| 841
| 30.185185
| 57
|
py
|
amuse
|
amuse-main/support/__init__.py
|
supportrc=dict(framework_install=True, package_name="amuse", allow_build_failures='some')
def use(arg):
if arg == "package":
supportrc["framework_install"]=True
else:
if arg not in ["system","installed","environment"]:
warnings.warn(" assuming framework already installed")
supportrc["framework_install"]=False
def set_package_name(arg):
supportrc["package_name"]=arg
def set_allow_build_failures(arg):
if arg=="yes" or (arg==True): arg='some'
if arg=="no" or (arg==False): arg='none'
supportrc["allow_build_failures"]=arg
| 586
| 31.611111
| 89
|
py
|
amuse
|
amuse-main/support/generate_main.py
|
__revision__ = "$Id:$"
import sys, os, re, subprocess,stat
from stat import ST_MODE
from distutils import sysconfig
from distutils.core import Command
from distutils.dep_util import newer
from distutils.util import convert_path
from distutils import log
class generate_main(Command):
description = "generate shell script to run amuse"
user_options = [
('amuse-dir=', 'd', "root directory of the amuse project"),
]
def initialize_options (self):
self.amuse_dir = None
def finalize_options (self):
if self.amuse_dir is None:
self.amuse_dir =os.path.dirname(os.path.dirname(__file__))
def get_source_files(self):
return self.latex_documents
def run (self):
test_directory = os.path.join(self.amuse_dir, 'test')
src_directory = os.path.join(self.amuse_dir, 'src')
with open('amuse.sh','w') as script_file:
script_file.write('#!/bin/sh')
script_file.write('\n\n')
script_file.write('export PYTHONPATH=${PYTHONPATH}')
for x in [test_directory, src_directory]:
script_file.write(':')
script_file.write(x)
script_file.write('\n')
script_file.write('export AMUSE_DIR=')
script_file.write(self.amuse_dir)
script_file.write('\n')
script_file.write(sys.executable)
script_file.write(' "$@"\n')
os.chmod('amuse.sh', stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
with open('iamuse.sh','w') as script_file:
script_file.write('#!/usr/bin/env python')
script_file.write('\n\n')
script_file.write('import IPython.Shell\n')
script_file.write('import sys\n')
for x in [test_directory, src_directory]:
script_file.write("sys.path.append('{0}')\n".format(x))
script_file.write('amuse_root_dir = "')
script_file.write(self.amuse_dir)
script_file.write('"\n')
script_file.write('IPython.Shell.start().mainloop()\n')
os.chmod('iamuse.sh', stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
| 2,167
| 33.412698
| 74
|
py
|
amuse
|
amuse-main/packages/setup_template.py
|
#!/usr/bin/env python3
import sys
name = sys.argv[1]
setupstring = '''#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-{name_lowercase}'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - {name}'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.{name_lowercase}',
]
package_data = {{
}}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.{name_lowercase}.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {{
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/{name_lowercase}/version.py",
}}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={{
'amuse.community.{name_lowercase}': 'src/amuse/community/{name_lowercase}',
}},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)'''
print(setupstring.format(name=name, name_lowercase=name.lower()))
| 1,983
| 24.435897
| 83
|
py
|
amuse
|
amuse-main/packages/amuse-petar/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-petar'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - PeTar'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.petar',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.petar.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/petar/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.petar': 'src/amuse/community/petar',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,770
| 23.260274
| 75
|
py
|
amuse
|
amuse-main/packages/amuse-tests/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-tests'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - tests'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.test.suite',
'amuse.test.suite.ext_tests',
'amuse.test.suite.core_tests',
'amuse.test.suite.compile_tests',
'amuse.test.suite.codes_tests',
'amuse.test.suite.ticket_tests',
'amuse.test.suite.reports',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.test.suite.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/test/suite/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.test.suite': 'src/amuse/test/suite',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,960
| 23.822785
| 75
|
py
|
amuse
|
amuse-main/packages/amuse-seba/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-seba'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - SeBa'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.seba',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.seba.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/seba/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.seba': 'src/amuse/community/seba',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,763
| 23.164384
| 74
|
py
|
amuse
|
amuse-main/packages/amuse-smalln/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-smalln'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - smalln'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.smalln',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.smalln.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/smalln/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.smalln': 'src/amuse/community/smalln',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,777
| 23.356164
| 76
|
py
|
amuse
|
amuse-main/packages/amuse-tutorial/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from support.misc import find_data_files
from setuptools import setup
name = 'amuse-tutorial'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'matplotlib>=2.2',
'amuse-framework',
'amuse-bhtree',
'amuse-hermite',
'amuse-seba',
'amuse-sphray',
'notebook',
]
description = 'The Astrophysical Multipurpose Software Environment - tutorial'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
all_data_files = find_data_files(
'tutorial', 'share/amuse/tutorial', '*', recursive=True
)
try:
from tutorial.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "tutorial/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
# cmdclass=mapping_from_command_name_to_command_class,
data_files=all_data_files,
scripts=["bin/amuse-tutorial"],
packages=[],
)
| 1,633
| 24.936508
| 78
|
py
|
amuse
|
amuse-main/packages/amuse-mesa-r15140/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-mesa-r15140'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - MESA (r15140)'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.mesa_r15140',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.mesa_r15140.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/mesa_r15140/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.7",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.mesa_r15140': 'src/amuse/community/mesa_r15140',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,814
| 23.863014
| 83
|
py
|
amuse
|
amuse-main/packages/amuse-mercury/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-mercury'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Mercury'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.mercury',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.mercury.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/mercury/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.mercury': 'src/amuse/community/mercury',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,784
| 23.452055
| 77
|
py
|
amuse
|
amuse-main/packages/amuse-twobody/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-twobody'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - twobody'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.twobody',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.twobody.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/twobody/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.twobody': 'src/amuse/community/twobody',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,784
| 23.452055
| 77
|
py
|
amuse
|
amuse-main/packages/amuse-fastkick/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-fastkick'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - fastkick'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.fastkick',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.fastkick.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/fastkick/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.fastkick': 'src/amuse/community/fastkick',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,791
| 23.547945
| 78
|
py
|
amuse
|
amuse-main/packages/amuse-distributed/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-distributed'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Distributed'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.distributed',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.distributed.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/distributed/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.distributed': 'src/amuse/community/distributed',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,812
| 23.835616
| 81
|
py
|
amuse
|
amuse-main/packages/amuse-fi/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-fi'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - FI'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.fi',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.fi.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/fi/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.fi': 'src/amuse/community/fi',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,749
| 22.972603
| 72
|
py
|
amuse
|
amuse-main/packages/amuse-athena/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-athena'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Athena'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.athena',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.athena.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/athena/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.athena': 'src/amuse/community/athena',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,777
| 23.356164
| 76
|
py
|
amuse
|
amuse-main/packages/amuse-secularmultiple/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-secularmultiple'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - SecularMultiple'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.secularmultiple',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.secularmultiple.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/secularmultiple/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.secularmultiple': 'src/amuse/community/secularmultiple',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,840
| 24.219178
| 85
|
py
|
amuse
|
amuse-main/packages/amuse-mobse/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-mobse'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - MOBSE'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.mobse',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.mobse.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/mobse/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.mobse': 'src/amuse/community/mobse',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,770
| 23.260274
| 75
|
py
|
amuse
|
amuse-main/packages/amuse/setup.py
|
from setuptools import setup
from support.classifiers import classifiers
from setuptools_scm import get_version
version = get_version(
root='../..',
relative_to=__file__,
)
name = 'amuse'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'matplotlib>=2.2',
'amuse-framework>=%s' % version,
'amuse-athena>=%s' % version,
'amuse-bhtree>=%s' % version,
# Brutus won't build on macOS as mpfr is not found - #209
# 'amuse-brutus>=%s' % version,
'amuse-bse>=%s' % version,
'amuse-capreole>=%s' % version,
'amuse-evtwin>=%s' % version,
'amuse-fastkick>=%s' % version,
'amuse-fi>=%s' % version,
'amuse-fractalcluster>=%s' % version,
'amuse-framework>=%s' % version,
'amuse-gadget2>=%s' % version,
'amuse-galactics>=%s' % version,
'amuse-galaxia>=%s' % version,
'amuse-halogen>=%s' % version,
'amuse-hermite>=%s' % version,
'amuse-hop>=%s' % version,
'amuse-huayno>=%s' % version,
'amuse-kepler>=%s' % version,
'amuse-kepler-orbiters>=%s' % version,
'amuse-mameclot>=%s' % version,
'amuse-mercury>=%s' % version,
'amuse-mmams>=%s' % version,
'amuse-ph4>=%s' % version,
'amuse-phigrape>=%s' % version,
'amuse-seba>=%s' % version,
'amuse-secularmultiple>=%s' % version,
'amuse-simplex>=%s' % version,
'amuse-smalln>=%s' % version,
'amuse-sphray>=%s' % version,
'amuse-sse>=%s' % version,
'amuse-twobody>=%s' % version,
]
description = 'The Astrophysical Multipurpose Software Environment'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
try:
from src.amuse.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
# "write_to": "src/amuse/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
packages=[],
)
| 2,499
| 27.735632
| 67
|
py
|
amuse
|
amuse-main/packages/amuse-kepler-orbiters/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-kepler-orbiters'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Kepler-orbiters'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.kepler_orbiters',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.kepler_orbiters.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/kepler_orbiters/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.kepler_orbiters': 'src/amuse/community/kepler_orbiters',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,840
| 24.219178
| 85
|
py
|
amuse
|
amuse-main/packages/amuse-ph4/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-ph4'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - ph4'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.ph4',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.ph4.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/ph4/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.ph4': 'src/amuse/community/ph4',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,756
| 23.068493
| 73
|
py
|
amuse
|
amuse-main/packages/amuse-sse/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-sse'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - SSE'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.sse',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.sse.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/sse/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.sse': 'src/amuse/community/sse',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,756
| 23.068493
| 73
|
py
|
amuse
|
amuse-main/packages/amuse-galactics/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-galactics'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Galactics'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.galactics',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.galactics.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/galactics/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.galactics': 'src/amuse/community/galactics',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,798
| 23.643836
| 79
|
py
|
amuse
|
amuse-main/packages/amuse-mmams/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-mmams'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - MMAMS'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.mmams',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.mmams.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/mmams/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.mmams': 'src/amuse/community/mmams',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,770
| 23.260274
| 75
|
py
|
amuse
|
amuse-main/packages/amuse-phigrape/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-phigrape'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - phiGRAPE'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.phigrape',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.phigrape.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/phigrape/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.phigrape': 'src/amuse/community/phigrape',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,791
| 23.547945
| 78
|
py
|
amuse
|
amuse-main/packages/amuse-sphray/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-sphray'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - SPHRay'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.sphray',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.sphray.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/sphray/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.sphray': 'src/amuse/community/sphray',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,777
| 23.356164
| 76
|
py
|
amuse
|
amuse-main/packages/amuse-brutus/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-brutus'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Brutus'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.brutus',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.brutus.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/brutus/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.brutus': 'src/amuse/community/brutus',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,777
| 23.356164
| 76
|
py
|
amuse
|
amuse-main/packages/amuse-halogen/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-halogen'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Halogen'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.halogen',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.halogen.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/halogen/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.halogen': 'src/amuse/community/halogen',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,784
| 23.452055
| 77
|
py
|
amuse
|
amuse-main/packages/amuse-mameclot/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-mameclot'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Mameclot'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.mameclot',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.mameclot.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/mameclot/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.mameclot': 'src/amuse/community/mameclot',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,791
| 23.547945
| 78
|
py
|
amuse
|
amuse-main/packages/amuse-fractalcluster/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-fractalcluster'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - fractalcluster'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.fractalcluster',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.fractalcluster.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/fractalcluster/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.fractalcluster': 'src/amuse/community/fractalcluster',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,833
| 24.123288
| 84
|
py
|
amuse
|
amuse-main/packages/amuse-galaxia/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-galaxia'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Galaxia'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.galaxia',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.galaxia.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/galaxia/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.galaxia': 'src/amuse/community/galaxia',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,784
| 23.452055
| 77
|
py
|
amuse
|
amuse-main/packages/amuse-hop/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-hop'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Hop'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.hop',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.hop.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/hop/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.hop': 'src/amuse/community/hop',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,756
| 23.068493
| 73
|
py
|
amuse
|
amuse-main/packages/amuse-mosse/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-mosse'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - MOSSE'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.mosse',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.mosse.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/mosse/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.mosse': 'src/amuse/community/mosse',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,770
| 23.260274
| 75
|
py
|
amuse
|
amuse-main/packages/amuse-aarsethzare/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-aarsethzare'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - AarsethZare'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.aarsethzare',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.aarsethzare.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/aarsethzare/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.aarsethzare': 'src/amuse/community/aarsethzare',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,812
| 23.835616
| 81
|
py
|
amuse
|
amuse-main/packages/amuse-vader/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-vader'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - VADER'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.vader',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.vader.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/vader/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.vader': 'src/amuse/community/vader',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,770
| 23.260274
| 75
|
py
|
amuse
|
amuse-main/packages/amuse-gadget2/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-gadget2'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Gadget2'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.gadget2',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.gadget2.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/gadget2/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.gadget2': 'src/amuse/community/gadget2',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,784
| 23.452055
| 77
|
py
|
amuse
|
amuse-main/packages/amuse-huayno/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-huayno'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Huayno'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.huayno',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.huayno.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/huayno/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.huayno': 'src/amuse/community/huayno',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,777
| 23.356164
| 76
|
py
|
amuse
|
amuse-main/packages/amuse-framework/setup.py
|
from support.classifiers import classifiers
from setuptools import setup, find_packages
from support.setup_codes import setup_commands
name = 'amuse-framework'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'setuptools>=41.0.0',
'setuptools_scm',
'pip>=19.0.0',
'wheel>=0.32',
'docutils>=0.6',
'numpy>=1.2.2',
'pytest>=4.0',
'h5py>=1.1.0',
]
description = 'The Astrophysical Multipurpose Software Environment'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
all_data_files.append(('share/amuse', ['./config.mk']))
packages = find_packages('src', exclude=["amuse.community.*"])
packages.append("amuse.community.interface")
package_data = {
'amuse.rfi.tools': ['*.template'],
'amuse': [
'*rc'
]
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = None
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.7",
extras_require={
"MPI": ["mpi4py>=1.1.0"]
},
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={'': 'src'},
packages=packages,
package_data=package_data,
data_files=all_data_files,
scripts=["bin/amusifier", ],
)
| 2,058
| 24.419753
| 67
|
py
|
amuse
|
amuse-main/packages/amuse-kepler/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-kepler'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Kepler'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.kepler',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.kepler.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/kepler/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.kepler': 'src/amuse/community/kepler',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,777
| 23.356164
| 76
|
py
|
amuse
|
amuse-main/packages/amuse-simplex/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-simplex'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'mpi4py>=1.1.0', # leave it here because needs MPI
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Simplex'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.simplex',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.simplex.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/simplex/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.simplex': 'src/amuse/community/simplex',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,839
| 23.864865
| 77
|
py
|
amuse
|
amuse-main/packages/amuse-phantom/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-phantom'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Phantom'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.phantom',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.phantom.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/phantom/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.phantom': 'src/amuse/community/phantom',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,784
| 23.452055
| 77
|
py
|
amuse
|
amuse-main/packages/amuse-bhtree/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-bhtree'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - BHTree'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.bhtree',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.bhtree.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/bhtree/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.bhtree': 'src/amuse/community/bhtree',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,777
| 23.356164
| 76
|
py
|
amuse
|
amuse-main/packages/amuse-capreole/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-capreole'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Capreole'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.capreole',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.capreole.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/capreole/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.capreole': 'src/amuse/community/capreole',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,791
| 23.547945
| 78
|
py
|
amuse
|
amuse-main/packages/amuse-hermite/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-hermite'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Hermite'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.hermite',
'amuse.community.hermite0',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.hermite.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/hermite/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.hermite': 'src/amuse/community/hermite',
'amuse.community.hermite0': 'src/amuse/community/hermite0',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,884
| 24.133333
| 77
|
py
|
amuse
|
amuse-main/packages/amuse-evtwin/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-evtwin'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - EVTwin'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.evtwin',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.evtwin.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/evtwin/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.evtwin': 'src/amuse/community/evtwin',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,777
| 23.356164
| 76
|
py
|
amuse
|
amuse-main/packages/amuse-mikkola/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-mikkola'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Mikkola'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.mikkola',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.mikkola.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/mikkola/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.mikkola': 'src/amuse/community/mikkola',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,784
| 23.452055
| 77
|
py
|
amuse
|
amuse-main/packages/amuse-mesa-r2208/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-mesa-r2208'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - MESA (r2208)'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.mesa_r2208',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.mesa_r2208.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/mesa_r2208/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.7",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.mesa_r2208': 'src/amuse/community/mesa_r2208',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,807
| 23.767123
| 82
|
py
|
amuse
|
amuse-main/packages/amuse-bse/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-bse'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - BSE'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.bse',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.bse.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/bse/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.bse': 'src/amuse/community/bse',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,756
| 23.068493
| 73
|
py
|
amuse
|
amuse-main/packages/amuse-mesa/setup.py
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-mesa'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - MESA'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.mesa',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
try:
from src.amuse.community.mesa.version import version
use_scm_version = False
setup_requires = []
except ImportError:
version = False
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"write_to": "src/amuse/community/mesa/version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.7",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.mesa': 'src/amuse/community/mesa',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 1,763
| 23.164384
| 74
|
py
|
amuse
|
amuse-main/lib/simple_hash/test.py
|
import subprocess
import random
import sys
class HashTableWrapper:
def __init__(self, pathToExe):
self.p = subprocess.Popen(pathToExe, stdin=subprocess.PIPE, stdout=subprocess.PIPE,bufsize=0)
# self.p.stdin = DebugPrintFilter(self.p.stdin)
def __setitem__(self, key, value):
# print 'insert %d %d' % (key, value)
self.p.stdin.write(b'insert %d %d\n' % (key, value))
def __getitem__(self, key):
# print 'lookup %d' % key
self.p.stdin.write(b'lookup %d\n' % key)
return eval(self.p.stdout.readline())
def increment(self, key):
# print 'increment %d' % key
self.p.stdin.write(b'increment %d\n' % key)
def __delitem__(self, key):
# print 'delete %d' % key
self.p.stdin.write(b'delete %d\n' % key)
def clear(self):
# print "clear"
self.p.stdin.write(b'clear\n')
def compact(self):
# print "compact"
self.p.stdin.write(b'compact\n')
def run(self, test, *args):
r = test(self, *args)
self.p.stdin.close()
return r, eval(self.p.stdout.read())
class DictionaryWrapper:
def __init__(self):
self.d = {}
def __setitem__(self, key, value):
self.d[key] = value
def __getitem__(self, key):
return self.d.get(key)
def increment(self, key):
self.d[key] = self.d.get(key, 0) + 1
def __delitem__(self, key):
if key in self.d:
del self.d[key]
def clear(self):
self.d.clear()
def compact(self):
pass
def run(self, test, *args):
return test(self, *args), self.d
class DebugPrintFilter:
def __init__(self, pipe):
self.pipe = pipe
def write(self, text):
sys.stdout.write(text)
self.pipe.write(text)
def close(self):
self.pipe.close()
def RandomizedTest(w, seed, keys, loops):
random.seed(seed + 1)
r = []
for i in range(loops):
for j in range(random.randint(0, len(keys))):
w[random.choice(keys)] = random.randint(0, 0xffffffff-1)
for j in range(random.randint(0, len(keys))):
w.increment(random.choice(keys))
for j in range(random.randint(0, len(keys))):
del w[random.choice(keys)]
for j in range(random.randint(0, len(keys))):
r.append(w[random.choice(keys)])
if random.randint(0, 3) == 0:
w.clear()
if random.randint(0, 1) == 0:
w.compact()
return r
if __name__ == '__main__':
pathToExe = sys.argv[1]
seed = int(sys.argv[2])
keySets = [
[0],
list(range(4)),
list(range(10)),
list(range(32)),
list(range(100)),
[0] + [random.randint(1, 0xffffffff) for i in range(4)],
[0] + [random.randint(1, 0xffffffff) for i in range(10)],
[0] + [random.randint(1, 0xffffffff) for i in range(32)],
[0] + [random.randint(1, 0xffffffff) for i in range(100)],
[random.randint(0, 0xffffffff) for i in range(20000)],
# [0] + [random.randint(1, 10) for i in xrange(4)],
# [0] + [random.randint(1, 100) for i in xrange(10)],
# [0] + [random.randint(1, 1000) for i in xrange(32)],
# [0] + [random.randint(1, 10000) for i in xrange(100)],
# [random.randint(0, 1000) for i in xrange(10000)],
]
for keys in keySets:
r1 = HashTableWrapper(pathToExe).run(RandomizedTest, seed, keys, 4)
r2 = DictionaryWrapper().run(RandomizedTest, seed, keys, 4)
print(len(keys),r1==r2)
if not r1 == r2:
print("test fails")
sys.exit(1)
| 3,620
| 33.160377
| 101
|
py
|
amuse
|
amuse-main/lib/sapporo_2/download.py
|
#!/usr/bin/env python
import subprocess
import os
import urllib.request
import urllib.parse
import urllib.error
from optparse import OptionParser
class GetCodeFromHttp(object):
url = "https://github.com/treecode/sapporo2/tarball/master"
alternative_url = \
"http://amuse.strw.leidenuniv.nl/codes/sapporo2-598e88c.tgz"
filename = "master.tgz"
def directory(self):
return os.path.abspath(os.path.dirname(__file__))
def src_directory(self):
return os.path.join(self.directory(), 'src')
def unpack_downloaded_file(self, filename):
print("unpacking", filename)
arguments = ['tar', '-xf']
arguments.append(filename)
subprocess.call(
arguments,
cwd=os.path.join(self.src_directory())
)
for x in os.listdir(os.path.join(self.src_directory())):
if x.startswith('treecode-sapporo2-'):
subprocess.call(
['mv', x, 'sapporo2-master'],
cwd=os.path.join(self.src_directory())
)
break
print("done")
def start(self):
if os.path.exists('src'):
counter = 0
while os.path.exists('src.{0}'.format(counter)):
counter += 1
if counter > 100:
print("too many backup directories")
break
os.rename('src', 'src.{0}'.format(counter))
os.mkdir('src')
url = self.url
filename = self.filename
filepath = os.path.join(self.src_directory(), filename)
print("downloading sapporo2 from", url, "to", filename)
urllib.request.urlretrieve(url, filepath)
print("downloading finished")
self.unpack_downloaded_file(filename)
def main(must_download_from_github=False):
if must_download_from_github:
print(
"download using git is not supported yet, will download tarball "
"instead"
)
instance = GetCodeFromHttp()
else:
instance = GetCodeFromHttp()
instance.start()
def new_option_parser():
result = OptionParser()
result.add_option(
"-g", "--github",
default=False,
dest="must_download_from_github",
help="if given will download the code from the github repository "
"using git",
action="store_true"
)
return result
if __name__ == "__main__":
options, arguments = new_option_parser().parse_args()
main(**options.__dict__)
| 2,548
| 27.010989
| 77
|
py
|
amuse
|
amuse-main/doc/conf.py
|
# -*- coding: utf-8 -*-
#
# AMUSE documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 29 13:22:44 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import datetime
is_running_in_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../test'))
sys.path.append(os.path.abspath('../src'))
sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
#'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.doctest',
'io_directive',
'autodoc_parameters',
'sphinx_rtd_theme',
]
if not is_running_in_rtd:
extensions.extend([
'matplotlib.sphinxext.plot_directive',
'rst2pdf.pdfbuilder',
])
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AMUSE'
copyright = u'2009 - %i The AMUSE Team'%datetime.date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = 'dev'
version = 'dev'
# The full version, including alpha/beta/rc tags.
#release = 'latest'
release = 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
if is_running_in_rtd:
html_theme = 'default'
else:
# html_theme = 'basic'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
if not is_running_in_rtd:
html_additional_pages = {'gallery':'gallery.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'AMUSEdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
# ('index', 'AMUSE.tex', u'AMUSE Documentation',
# u'The AMUSE Team', 'manual'),
('reference/index', 'reference-doc.tex', u'Reference Documentation',
u'The AMUSE Team', 'manual'),
('design/index', 'design-doc.tex', u'Design Documentation',
u'The AMUSE Team', 'manual'),
('tutorial/index', 'tutorial.tex', u'Tutorials',
u'The AMUSE Team', 'howto'),
('install/index', 'installation.tex', u'Installing AMUSE',
u'The AMUSE Team', 'howto'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
pdf_documents = [
# ('index', 'AMUSE.tex', u'AMUSE Documentation',
# u'The AMUSE Team', 'manual'),
('reference/index', 'reference-doc', u'Reference Documentation',
u'The AMUSE Team', 'manual'),
('design/index', 'design-doc', u'Design Documentation',
u'The AMUSE Team', 'manual'),
('tutorial/index', 'tutorial', u'Tutorials',
u'The AMUSE Team', 'howto'),
('install/index', 'installation', u'Installing AMUSE',
u'The AMUSE Team', 'howto'),
]
# html_baseurl = "https://amusecode.github.io/documentation/"
| 8,229
| 30.899225
| 80
|
py
|
amuse
|
amuse-main/doc/install/install.py
|
#!/usr/bin/env python3
import sys
import os.path
import os
import urllib.request
import subprocess
import shutil
import ssl
try:
ssl._create_default_https_context = ssl._create_unverified_context
except:
pass
IS_ON_OSX = sys.platform == 'darwin'
PYTHON = sys.executable
def late(function):
class LateProperty(object):
def __init__(self, initializer):
self.initializer = initializer
def __get__(self, instance, owner):
if instance is None:
return self
value = self.initializer(instance)
setattr(instance,self.initializer.__name__,value)
return value
return LateProperty(function)
class InstallPrerequisites(object):
@late
def prefix(self):
if 'VIRTUAL_ENV' in os.environ:
return os.environ['VIRTUAL_ENV']
path = os.path.split(sys.executable)[0]
if 'Framework' in path:
return path[:path.index('Framework')]
else:
return path[:path.index('bin')-1]
@late
def applications(self):
return [
(
'numpy' , #name to refer by
[], #names of prerequisites (unused)
'1.17.4' , #version string
'numpy-', '.tar.gz', #pre- and postfix for filename
'https://github.com/numpy/numpy/releases/download/v1.17.4/', #download url, filename is appended
self.numpy_build #method to use for building
),
(
'nose',
[],
'1.3.0',
'nose-' , '.tar.gz',
'https://pypi.python.org/packages/source/n/nose/',
self.python_build
),
(
'cython',
[],
'0.29.14',
'Cython-' , '.tar.gz',
'https://pypi.io/packages/source/c/cython/',
self.python_build
) ,
(
'hdf' ,
[],
'1.10.6',
'hdf5-' , '.tar.gz' ,
'https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.10/hdf5-1.10.6/src/',
self.hdf5_build
) ,
(
'h5py',
['hdf'],
'2.10.0',
'h5py-' , '.tar.gz',
'https://pypi.python.org/packages/source/h/h5py/', self.h5py_build
) ,
(
'netcdf-c' ,
['hdf'],
'4.7.4',
'v' , '.tar.gz' ,
'https://github.com/Unidata/netcdf-c/archive/',
self.netcdf_build
) ,
(
'netcdf-fortran' ,
['netcdf-c'],
'4.5.3',
'v' , '.tar.gz' ,
'https://github.com/Unidata/netcdf-fortran/archive/',
self.netcdf_build
) ,
(
'netcdf4-python' ,
['netcdf-c'],
'1.5.4rel',
'v' , '.tar.gz' ,
'https://github.com/Unidata/netcdf4-python/archive/',
self.python_build
) ,
(
'f90nml',
[],
'1.1.2',
'v' , '.tar.gz',
'https://github.com/marshallward/f90nml/archive/',
self.python_build
),
(
'docutils',
[],
'0.15.2',
'docutils-','.tar.gz',
'https://pypi.python.org/packages/source/d/docutils/',
self.python_build
) ,
(
'mpich',
[],
'3.3.2',
'mpich-', '.tar.gz',
'https://www.mpich.org/static/tarballs/3.3.2/',
self.mpich2_build
) ,
(
'mpi4py',
['mpich2'],
'3.0.3',
'mpi4py-', '.tar.gz',
'https://bitbucket.org/mpi4py/mpi4py/downloads/',
self.python_build
) ,
(
'fftw3' , #name to refer by
[], #names of prerequisites (unused)
'3.3.3' , #version string
'fftw-', '.tar.gz', #pre- and postfix for filename
'http://www.fftw.org/', #download url, filename is appended
self.fftw_build #method to use for building
) ,
(
'gsl' , #name to refer by
[], #names of prerequisites (unused)
'1.16' , #version string
'gsl-', '.tar.gz', #pre- and postfix for filename
'https://ftp.gnu.org/gnu/gsl/', #download url, filename is appended
self.fftw_build #method to use for building - same as for FFTW should work
) ,
(
'cmake' , #name to refer by
[], #names of prerequisites (unused)
'3.19.4' , #version string
'cmake-', '.tar.gz', #pre- and postfix for filename
'https://www.cmake.org/files/v3.19/', #download url, filename is appended
self.cmake_build #method to use for building
) ,
(
'gmp', #name to refer by
[], #names of prerequisites (unused)
'6.1.2' , #version string
'gmp-', '.tar.bz2', #pre- and postfix for filename
'https://gmplib.org/download/gmp/', #download url, filename is appended
self.gmp_build #method to use for building
) ,
(
'mpfr' , #name to refer by
['gmp'], #names of prerequisites
'4.0.2' , #version string
'mpfr-', '.tar.gz', #pre- and postfix for filename
'https://www.mpfr.org/mpfr-4.0.2/', #download url, filename is appended
self.mpfr_build #method to use for building
) ,
]
@late
def temp_dir(self):
return os.path.join(self.prefix,'install','_temp')
@late
def fortran90_compiler(self):
if 'FC' in os.environ:
return os.environ['FC']
else:
return self.fortran_compiler
@late
def fortran77_compiler(self):
if 'F77' in os.environ:
return os.environ['F77']
else:
return None
@late
def fortran_compiler(self):
if 'FC' in os.environ:
return os.environ['FC']
elif 'FORTRAN' in os.environ:
return os.environ['FORTRAN']
elif 'F77' in os.environ:
return os.environ['F77']
elif 'FORT' in os.environ:
return os.environ['FORT']
else:
return None
@late
def use_hydra_process_manager(self):
return False
@late
def use_gforker_process_manager(self):
return False
def setup_temp_dir(self):
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
def run_application(self, args, cwd, env = None):
print("starting " , ' '.join(args))
process = subprocess.Popen(args, cwd=cwd, env = env)
returncode = process.wait()
if returncode != 0:
commandline = ' '.join(args)
raise Exception("Error when running <" + commandline + ">")
print("finished " , ' '.join(args))
def h5py_build(self, path):
self.run_application([PYTHON,'setup.py','configure','--hdf5='+self.prefix], cwd=path)
self.run_application([PYTHON,'setup.py','build'],cwd=path)
self.run_application([PYTHON,'setup.py','install', '--prefix='+self.prefix], cwd=path)
def setuptools_install(self, path):
self.run_application(['sh',], cwd=path)
def hdf5_build(self, path):
commands = []
commands.append([
'./configure',
'--prefix='+self.prefix,
'--enable-shared',
'--disable-hl',
'--enable-build-mode=production',
'--with-pthread=/usr',
'--enable-threadsafe'
])
commands.append(['make'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path)
def python_build(self, path):
self.run_application([PYTHON,'setup.py','build'], cwd=path)
self.run_application([PYTHON,'setup.py','install', '--prefix='+self.prefix], cwd=path)
def numpy_build(self, path):
env = os.environ.copy()
env['BLAS'] = 'None'
env['LAPACK'] = 'None'
env['ATLAS'] = 'None'
self.run_application([PYTHON,'setup.py','build'], cwd=path, env=env)
self.run_application([PYTHON,'setup.py','install', '--prefix='+self.prefix], cwd=path, env=env)
def mercurial_build(self, path):
self.run_application(['make','install','PREFIX='+self.prefix], cwd=path)
def openmpi_build(self, path):
commands = []
commands.append([
'./configure',
'--prefix='+self.prefix,
#'--enable-mpi-threads',
'--enable-cxx-exceptions',
'--enable-debug',
'--enable-orterun-prefix-by-default',
])
commands.append(['make'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path)
def mpich2_build(self, path):
commands = []
command = [
'./configure',
'--prefix='+self.prefix,
'--enable-shared',
'--enable-sharedlibs=gcc',
'--enable-fc',
'--with-python='+sys.executable,
'--with-device=ch3:sock',
]
if self.use_hydra_process_manager:
command.append('--with-pm=hydra:gforker')
elif self.use_gforker_process_manager:
command.append('--with-pm=gforker:hydra')
else:
command.append('--with-pm=gforker:hydra')
if not self.fortran90_compiler is None:
command.append('FC=' + self.fortran90_compiler)
commands.append(command)
commands.append(['make'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path)
self.check_mpich2_install(commands, path)
def fftw_build(self, path):
commands = []
command = [
'./configure',
'--prefix='+self.prefix,
'--enable-shared',
'--enable-threads'
]
commands.append(command)
commands.append(['make'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path)
def basic_build(self, path):
commands = []
command = [
'./configure',
'--prefix='+self.prefix,
'--enable-shared'
]
commands.append(command)
commands.append(['make'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path)
def netcdf_build(self, path):
env = os.environ.copy()
env['LDFLAGS'] = '-L{0}/lib64'.format(self.prefix) + env.get('LDFLAGS','')
env['CPPFLAGS'] = '-I{0}/include '.format(self.prefix) + env.get('CPPFLAGS','')
env['CFLAGS'] = '-I{0}/include '.format(self.prefix) + env.get('CFLAGS','')
commands = []
command = [
'./configure',
'--prefix='+self.prefix,
#~ '--enable-shared'
]
commands.append(command)
commands.append(['make'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path, env=env)
def cmake_build(self, path):
commands = []
command = [
'./configure',
'--prefix='+self.prefix,
]
commands.append(command)
commands.append(['make'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path)
def gmp_build(self, path):
commands = []
command = [
'./configure',
'--prefix='+self.prefix,
'--enable-shared'
]
commands.append(command)
commands.append(['make'])
commands.append(['make', 'check'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path)
def mpfr_build(self, path):
commands = []
command = [
'./configure',
'--prefix='+self.prefix,
'--with-gmp='+self.prefix,
'--enable-shared',
'--enable-thread-safe'
]
commands.append(command)
commands.append(['make'])
commands.append(['make', 'check'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path)
def check_mpich2_install(self, commands, path):
bin_directory = os.path.join(self.prefix, 'bin')
mpif90_filename = os.path.join(bin_directory, 'mpif90')
if not os.path.exists(mpif90_filename):
print("-----------------------------------------------------------------")
print("MPICH build incomplete, no fortran 90 support")
print("-----------------------------------------------------------------")
print("The 'mpif90' command was not build")
print("This is usually caused by an incompatible C and fortran compiler")
print("Please set the F90, F77 and CC environment variables")
print()
print("After changing the environment variables,")
print("you can restart the install with:")
print()
print(" ./install.py install mpich2 mpi4py")
print()
print("You can rerun the build by hand, using:")
print()
print(" cd", path)
for command in commands:
print()
if len(command) < 3:
print(' ', ' '.join(command))
else:
print(' \\\n '.join(command))
sys.exit(1)
def openssl_build(self, path):
commands = []
commands.append([
'./config','--prefix='+self.prefix,
' --openssldir='+self.prefix+'/openssl',
'--shared'
])
commands.append(['make'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path)
def download_apps(self, names, skip):
for (name, dependencies, version, prefix, suffix, url_prefix, function) in self.applications:
if names and name not in names:
continue
if skip and name in skip:
continue
app_file = prefix + version + suffix
app_dir = prefix + version
url = url_prefix + app_file
temp_app_file = os.path.join(self.temp_dir, app_file)
if not os.path.exists(temp_app_file):
print("Downloading ", app_file, url)
urllib.request.urlretrieve(url, os.path.join(self.temp_dir, app_file))
print("...Finished")
def list_apps(self, names, skip):
for (name, dependencies, version, prefix, suffix, url_prefix, function) in self.applications:
if skip and name in skip:
continue
print(name, " - dowloaded from", url_prefix)
def list_download_urls(self, names, skip):
for (name, dependencies, version, prefix, suffix, url_prefix, function) in self.applications:
if skip and name in skip:
continue
app_file = prefix + version + suffix
app_dir = prefix + version
url = url_prefix + app_file
print(url)
def unpack_apps(self, names, skip):
for (name, dependencies, version, prefix, suffix, url_prefix, function) in self.applications:
if names and name not in names:
continue
if skip and name in skip:
continue
app_file = prefix + version + suffix
app_dir = prefix + version
url = url_prefix + app_file
temp_app_file = os.path.join(self.temp_dir, app_file)
temp_app_dir = os.path.join(self.temp_dir , app_dir)
if os.path.exists(temp_app_dir):
shutil.rmtree(temp_app_dir)
print("Unpacking ", app_file)
try:
self.run_application(['tar','-xf',app_file], cwd=self.temp_dir)
except:
print("----------------------------------------------------------")
print("Could not unpack source file of", name)
print("----------------------------------------------------------")
print()
print("Download location may have changed")
print("Please download the source file yourself, ")
print("or contact the AMUSE development team.")
print("https://github.com/amusecode/amuse/issues")
print()
print("To download the file you can update the URL in")
print("one of the following lines and run the command.")
print()
print("curl ", url, "-o", temp_app_file)
print()
print("or")
print()
print("wget ", url, "-O", temp_app_file)
print()
print("Note: The name of the output file must not be changed (after the -o or -O parameter)")
sys.exit(1)
print("...Finished")
def extract_path(self, app_file):
proc=subprocess.Popen(["tar","tf",app_file], stdout=subprocess.PIPE)
out,err=proc.communicate()
out=out.decode().split("\n")
return os.path.normpath(out[0]).split(os.sep)[0]
def build_apps(self, names, skip):
for (name, dependencies, version, prefix, suffix, url_prefix, function) in self.applications:
if names and name not in names:
continue
if skip and name in skip:
continue
app_file = prefix + version + suffix
app_dir = prefix + version
temp_app_dir = self.extract_path(os.path.join(self.temp_dir , app_file) )
temp_app_dir = os.path.join(self.temp_dir, temp_app_dir)
if not os.path.exists(temp_app_dir):
if prefix.endswith('-'):
app_dir = prefix[:-1]
else:
app_dir = prefix
temp_app_dir = os.path.join(self.temp_dir , app_dir)
if not os.path.exists(temp_app_dir):
app_file = prefix + version + suffix
if app_file.endswith('.tar.gz'):
app_dir = app_file[:-len('.tar.gz')]
elif app_file.endswith('.tar.bz2'):
app_dir = app_file[:-len('.tar.bz2')]
else:
app_dir, ignore = os.path.os.path.splitext(app_file)
temp_app_dir = os.path.join(self.temp_dir , app_dir)
if not os.path.exists(temp_app_dir):
print("Package was not correctly unpacked: ", app_file)
return
print("Building ", app_file)
function(temp_app_dir)
print("...Finished")
class InstallPrerequisitesOnOSX(InstallPrerequisites):
def mpich2_build(self, path):
commands = []
command = [
'./configure',
'--prefix='+self.prefix,
'--enable-fc',
'--enable-shared',
'--with-python='+sys.executable,
'--enable-threads',
'--enable-sharedlibs=osx-gcc',
'--with-device=ch3:sock',
]
if self.use_hydra_process_manager:
command.append('--with-pm=hydra:gforker')
elif self.use_gforker_process_manager:
command.append('--with-pm=gforker:hydra')
else:
command.append('--with-pm=hydra:gforker')
commands.append(command)
commands.append(['make'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path)
self.check_mpich2_install(commands, path)
def mpfr_build(self, path):
commands = []
command = [
'./configure',
'--prefix='+self.prefix,
'--with-gmp='+self.prefix,
'--enable-shared'
]
commands.append(command)
commands.append(['make'])
commands.append(['make', 'check'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path)
class InstallMatplotlib(InstallPrerequisites):
@late
def applications(self):
return (
(
'freetype' , #name to refer by
[], #names of prerequisites (unused)
'2.4.9' , #version string
'freetype-', '.tar.gz', #pre- and postfix for filename
'https://download.savannah.gnu.org/releases/freetype/', #download url, filename is appended
self.basic_build #method to use for building - same as for FFTW should work
) ,
(
'zlib' , #name to refer by
[], #names of prerequisites (unused)
'1.2.11' , #version string
'zlib-', '.tar.gz', #pre- and postfix for filename
'https://downloads.sourceforge.net/project/libpng/zlib/1.2.11/', #download url, filename is appended
self.basic_build #method to use for building - same as for FFTW should work
) ,
(
'png' , #name to refer by
[], #names of prerequisites (unused)
'1.6.37' , #version string
'libpng-', '.tar.gz', #pre- and postfix for filename
'https://downloads.sourceforge.net/project/libpng/libpng16/1.6.37/', #download url, filename is appended
self.basic_build #method to use for building - same as for FFTW should work
),
(
'matplotlib' , #name to refer by
[], #names of prerequisites (unused)
'2.2.2' , #version string
'matplotlib-', '.tar.gz', #pre- and postfix for filename
'https://pypi.python.org/packages/source/m/matplotlib/', #download url, filename is appended
self.matplotlib_build #method to use for building - same as for FFTW should work
),
)
def basic_build(self, path):
commands = []
command = [
'./configure',
'--prefix='+self.prefix,
'--enable-shared'
]
commands.append(command)
commands.append(['make'])
commands.append(['make', 'install'])
for x in commands:
self.run_application(x, path)
def matplotlib_build(self, path):
env = os.environ.copy()
env['CFLAGS'] ="-I{0}/include -I{0}/include/freetype2".format(self.prefix)
env['LDFLAGS'] = "-L{0}/lib".format(self.prefix)
self.run_application([PYTHON,'setup.py','build'], cwd=path, env = env)
self.run_application([PYTHON,'setup.py','install', '--prefix='+self.prefix], cwd=path, env = env)
if IS_ON_OSX:
INSTALL = InstallPrerequisitesOnOSX()
else:
INSTALL = InstallPrerequisites()
def download(names, skip):
INSTALL.download_apps(names, skip)
def install(names, skip):
INSTALL.download_apps(names, skip)
INSTALL.unpack_apps(names, skip)
INSTALL.build_apps(names, skip)
def listpackages(names, skip):
INSTALL.list_apps(names, skip)
def list_download_urls(names, skip):
INSTALL.list_download_urls(names, skip)
_commands = {
'download' : download,
'install' : install,
'list' : listpackages,
'url' : list_download_urls,
}
if __name__ == '__main__':
print("")
if INSTALL.fortran90_compiler is None or INSTALL.fortran77_compiler is None:
print("""No fortran 90 compiler environment variable set.
A FORTRAN 90 compiler is needed for MPI and several modules,
please set FC and F77 first by (bash, replace gfortran with your preferred
compiler):
export FC=gfortran
export F77=gfortran
or (csh):
setenv FC gfortran
setenv F77 gfortran
""")
sys.exit(1)
else:
print("Fortran 90 compiler used will be: ", INSTALL.fortran90_compiler)
print("Fortran 77 compiler used will be: ", INSTALL.fortran77_compiler)
print("")
do = []
names = []
flag = False
skip = []
for x in sys.argv:
if x in _commands.keys():
do.append(x)
flag = True
else:
if x == '--matplotlib':
INSTALL = InstallMatplotlib()
print("----------------------------------------------------------")
print("This feature is optional and experimental!")
print("----------------------------------------------------------")
print()
print("Will download and install matplotlib")
print("plus it most common prerequisites (zlib, png and freetype)")
elif x == '--hydra':
INSTALL.use_hydra_process_manager = True
elif x == '--gforker':
INSTALL.use_gforker_process_manager = True
elif x.startswith('--prefix='):
INSTALL.prefix = x[len('--prefix='):]
elif flag:
if x.startswith('no-'):
skip.append(x[3:])
else:
names.append(x)
print("Files are installed in: ", INSTALL.prefix)
print("Files are downloaded to: ", INSTALL.temp_dir)
INSTALL.setup_temp_dir()
for x in do:
_commands[x](names, skip)
if len(do) == 0:
print("Usage: install.py download|install|list [package names]")
print("")
print("download download the packages to the download directory")
print("install unpack and install the packages to the prefix directory")
print("")
print("you can also install download or install individual packages")
print("please specify a list of packages to install")
print("")
print("to install all prerequisites do:")
print("")
print("./install.py install")
print("")
print("to get a list of all packages:")
print("")
print("./install.py list")
print("")
print("hydra is a modern process manager with more options and")
print("faster distributed process start-up")
print("to install mpich2 with the hydra process manager do:")
print("")
print("./install.py --hydra install")
print("")
print("the gforker process manager is easier to run (no daemon)")
print("but only works on the local machine")
print("to install mpich2 with the gforker process manager do:")
print("")
print("./install.py --gforker install")
print("")
sys.exit(1)
| 28,457
| 34.931818
| 120
|
py
|
amuse
|
amuse-main/doc/tutorial/nearestneighbor/nn1.py
|
from amuse.community import *
class NearestNeighborInterface(CodeInterface):
include_headers = ['worker_code.h']
def __init__(self, **keyword_arguments):
CodeInterface.__init__(self, name_of_the_worker="nearestneighbor_worker", **keyword_arguments)
@legacy_function
def new_particle():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT)
function.addParameter('x', dtype='float64', direction=function.IN)
function.addParameter('y', dtype='float64', direction=function.IN)
function.addParameter('z', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def delete_particle():
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_state():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('x', dtype='float64', direction=function.OUT)
function.addParameter('y', dtype='float64', direction=function.OUT)
function.addParameter('z', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_state():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('x', dtype='float64', direction=function.IN)
function.addParameter('y', dtype='float64', direction=function.IN)
function.addParameter('z', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def find_nearest_neighbors():
function = LegacyFunctionSpecification()
function.result_type = 'int32'
return function
@legacy_function
def get_close_neighbors():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('index_of_first_neighbor', dtype='float64', direction=function.OUT)
function.addParameter('index_of_second_neighbor', dtype='float64', direction=function.OUT)
function.addParameter('index_of_third_neighbor', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def get_nearest_neighbor():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('index_of_the_neighbor', dtype='float64', direction=function.OUT)
function.addParameter('distance', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def get_number_of_particles():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
class NearestNeighbor(InCodeComponentImplementation):
def __init__(self):
InCodeComponentImplementation.__init__(self, NearestNeighborInterface())
| 3,843
| 41.241758
| 102
|
py
|
amuse
|
amuse-main/doc/tutorial/nearestneighbor/plummer2.py
|
from interface import NearestNeighbor
from amuse.lab import *
from amuse.io import text
if __name__ == '__main__':
number_of_particles = 1000
particles = new_plummer_sphere(1000)
code = NearestNeighbor()
code.particles.add_particles(particles)
code.run()
local_particles = code.particles.copy()
delta = local_particles.neighbor1.as_set().position - local_particles.position
local_particles.dx = delta[...,0]
local_particles.dy = delta[...,1]
local_particles.dz = delta[...,2]
output = text.TableFormattedText("output.txt", set = local_particles)
output.attribute_names = ['x','y','z', 'dx', 'dy','dz']
output.store()
| 679
| 26.2
| 82
|
py
|
amuse
|
amuse-main/doc/tutorial/nearestneighbor/plummer1.py
|
from interface import NearestNeighbor
from amuse.units import units, nbody_system
from amuse.ext import plummer
from amuse.io import text
number_of_particles = 1000
mass_per_particle = 1 | units.MSun
convert_nbody = nbody_system.nbody_to_si(number_of_particles * mass_per_particle, 1.0 | units.parsec)
uc = plummer.MakePlummerModel(number_of_particles, convert_nbody)
particles = uc.result
nn = NearestNeighbor()
nn.particles.add_particles(particles)
print "number of particles:", len(nn.particles)
nn.find_nearest_neighbors()
local_particles = nn.particles.copy()
for p in local_particles:
delta = p.neighbor1.position - p.position
p.distance_to_neighbor = delta.length()
p.dx = delta.x
p.dy = delta.y
p.dz = delta.z
output = text.TableFormattedText("output.txt", set = local_particles)
output.attribute_names = ['x','y','z', 'dx', 'dy','dz']
output.attribute_types = [units.parsec] * 6
output.store()
| 938
| 25.828571
| 101
|
py
|
amuse
|
amuse-main/doc/tutorial/nearestneighbor/nn2.py
|
from amuse.community import *
class NearestNeighborInterface(CodeInterface):
use_modules = ['NN']
def __init__(self, **keyword_arguments):
CodeInterface.__init__(self, name_of_the_worker="nearestneighbor_worker", **keyword_arguments)
@legacy_function
def commit_parameters():
function = LegacyFunctionSpecification()
function.result_type = 'int32'
return function
@legacy_function
def set_maximum_number_of_particles():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_maximum_number_of_particles():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def new_particle():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT)
function.addParameter('x', dtype='float64', direction=function.IN)
function.addParameter('y', dtype='float64', direction=function.IN)
function.addParameter('z', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def delete_particle():
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_state():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('x', dtype='float64', direction=function.OUT)
function.addParameter('y', dtype='float64', direction=function.OUT)
function.addParameter('z', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_state():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('x', dtype='float64', direction=function.IN)
function.addParameter('y', dtype='float64', direction=function.IN)
function.addParameter('z', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def run():
function = LegacyFunctionSpecification()
function.result_type = 'int32'
return function
@legacy_function
def get_close_neighbors():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('index_of_first_neighbor', dtype='float64', direction=function.OUT)
function.addParameter('index_of_second_neighbor', dtype='float64', direction=function.OUT)
function.addParameter('index_of_third_neighbor', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def get_nearest_neighbor():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('index_of_the_neighbor', dtype='float64', direction=function.OUT)
function.addParameter('distance', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def get_number_of_particles():
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter('value', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
return function
class NearestNeighbor(InCodeComponentImplementation):
def __init__(self):
InCodeComponentImplementation.__init__(self, NearestNeighborInterface())
| 4,501
| 39.558559
| 102
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.